diff --git a/CONTRIBUTING-ZH.md b/CONTRIBUTING-ZH.md index b96cb86f..91524fe7 100644 --- a/CONTRIBUTING-ZH.md +++ b/CONTRIBUTING-ZH.md @@ -123,6 +123,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package | PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 | | FLOWISE_USERNAME | 登录用户名 | 字符串 | | | FLOWISE_PASSWORD | 登录密码 | 字符串 | | +| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb | | DEBUG | 打印组件的日志 | 布尔值 | | | BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` | | LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4d90a695..f0f83abd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -127,6 +127,7 @@ Flowise support different environment variables to configure your instance. You | IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | | FLOWISE_USERNAME | Username to login | String | | | FLOWISE_PASSWORD | Password to login | String | | +| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | | DEBUG | Print logs from components | Boolean | | | BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` | | LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | diff --git a/docker/.env.example b/docker/.env.example index 18415673..ee972c9a 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -22,6 +22,7 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # FLOWISE_USERNAME=user # FLOWISE_PASSWORD=1234 # FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey +# FLOWISE_FILE_SIZE_LIMIT=50mb # DEBUG=true # LOG_LEVEL=debug (error | warn | info | verbose | debug) # TOOL_FUNCTION_BUILTIN_DEP=crypto,fs diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index cb45f37c..4bee2e39 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -10,6 +10,7 @@ services: - IFRAME_ORIGINS=${IFRAME_ORIGINS} - FLOWISE_USERNAME=${FLOWISE_USERNAME} - FLOWISE_PASSWORD=${FLOWISE_PASSWORD} + - FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT} - DEBUG=${DEBUG} - DATABASE_PATH=${DATABASE_PATH} - DATABASE_TYPE=${DATABASE_TYPE} diff --git a/package.json b/package.json index 451f7855..5f5f5812 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts new file mode 100644 index 00000000..e570b263 --- /dev/null +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts @@ -0,0 +1,135 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureChatOpenAI_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'AzureChatOpenAI' + this.name = 'azureChatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'AzureChatOpenAI' + this.icon = 'Azure.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + } + ], + default: 'gpt-3.5-turbo-16k', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + temperature: parseFloat(temperature), + model: modelName, + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts new file mode 100644 index 00000000..69a15114 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts @@ -0,0 +1,104 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { Anthropic } from 'llamaindex' + +class ChatAnthropic_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + tags: string[] + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatAnthropic' + this.name = 'chatAnthropic_LlamaIndex' + this.version = 1.0 + this.type = 'ChatAnthropic' + this.icon = 'Anthropic.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around ChatAnthropic LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['anthropicApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + } + ], + default: 'claude-2', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokensToSample', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined + const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string + const topP = nodeData.inputs?.topP as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: anthropicApiKey + } + + if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10) + if (topP) obj.topP = parseFloat(topP) + + const model = new Anthropic(obj) + return model + } +} + +module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts new file mode 100644 index 00000000..8b3567a6 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -0,0 +1,156 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +class ChatOpenAI_LlamaIndex_LLMs implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatOpenAI' + this.name = 'chatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'ChatOpenAI' + this.icon = 'openai.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around OpenAI Chat LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-turbo-preview', + name: 'gpt-4-turbo-preview' + }, + { + label: 'gpt-4-0125-preview', + name: 'gpt-4-0125-preview' + }, + { + label: 'gpt-4-1106-preview', + name: 'gpt-4-1106-preview' + }, + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + }, + { + label: 'gpt-4-0613', + name: 'gpt-4-0613' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-4-32k-0613', + name: 'gpt-4-32k-0613' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-1106', + name: 'gpt-3.5-turbo-1106' + }, + { + label: 'gpt-3.5-turbo-0613', + name: 'gpt-3.5-turbo-0613' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + }, + { + label: 'gpt-3.5-turbo-16k-0613', + name: 'gpt-3.5-turbo-16k-0613' + } + ], + default: 'gpt-3.5-turbo', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: openAIApiKey + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs } diff --git a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts index 3eba0ece..48ae85bc 100644 --- a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts +++ b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts @@ -126,7 +126,9 @@ class Cheerio_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 @@ -143,7 +145,7 @@ class Cheerio_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await cheerioLoader(page))) } } else { diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index f5d0c640..fb3db8e8 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -34,6 +34,12 @@ class Folder_DocumentLoaders implements INode { type: 'string', placeholder: '' }, + { + label: 'Recursive', + name: 'recursive', + type: 'boolean', + additionalParams: false + }, { label: 'Text Splitter', name: 'textSplitter', @@ -54,48 +60,54 @@ class Folder_DocumentLoaders implements INode { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const folderPath = nodeData.inputs?.folderPath as string const metadata = nodeData.inputs?.metadata + const recursive = nodeData.inputs?.recursive as boolean - const loader = new DirectoryLoader(folderPath, { - '.json': (path) => new JSONLoader(path), - '.txt': (path) => new TextLoader(path), - '.csv': (path) => new CSVLoader(path), - '.docx': (path) => new DocxLoader(path), - // @ts-ignore - '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), - '.aspx': (path) => new TextLoader(path), - '.asp': (path) => new TextLoader(path), - '.cpp': (path) => new TextLoader(path), // C++ - '.c': (path) => new TextLoader(path), - '.cs': (path) => new TextLoader(path), - '.css': (path) => new TextLoader(path), - '.go': (path) => new TextLoader(path), // Go - '.h': (path) => new TextLoader(path), // C++ Header files - '.java': (path) => new TextLoader(path), // Java - '.js': (path) => new TextLoader(path), // JavaScript - '.less': (path) => new TextLoader(path), // Less files - '.ts': (path) => new TextLoader(path), // TypeScript - '.php': (path) => new TextLoader(path), // PHP - '.proto': (path) => new TextLoader(path), // Protocol Buffers - '.python': (path) => new TextLoader(path), // Python - '.py': (path) => new TextLoader(path), // Python - '.rst': (path) => new TextLoader(path), // reStructuredText - '.ruby': (path) => new TextLoader(path), // Ruby - '.rb': (path) => new TextLoader(path), // Ruby - '.rs': (path) => new TextLoader(path), // Rust - '.scala': (path) => new TextLoader(path), // Scala - '.sc': (path) => new TextLoader(path), // Scala - '.scss': (path) => new TextLoader(path), // Sass - '.sol': (path) => new TextLoader(path), // Solidity - '.sql': (path) => new TextLoader(path), //SQL - '.swift': (path) => new TextLoader(path), // Swift - '.markdown': (path) => new TextLoader(path), // Markdown - '.md': (path) => new TextLoader(path), // Markdown - '.tex': (path) => new TextLoader(path), // LaTeX - '.ltx': (path) => new TextLoader(path), // LaTeX - '.html': (path) => new TextLoader(path), // HTML - '.vb': (path) => new TextLoader(path), // Visual Basic - '.xml': (path) => new TextLoader(path) // XML - }) + const loader = new DirectoryLoader( + folderPath, + { + '.json': (path) => new JSONLoader(path), + '.txt': (path) => new TextLoader(path), + '.csv': (path) => new CSVLoader(path), + '.docx': (path) => new DocxLoader(path), + // @ts-ignore + '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), + '.aspx': (path) => new TextLoader(path), + '.asp': (path) => new TextLoader(path), + '.cpp': (path) => new TextLoader(path), // C++ + '.c': (path) => new TextLoader(path), + '.cs': (path) => new TextLoader(path), + '.css': (path) => new TextLoader(path), + '.go': (path) => new TextLoader(path), // Go + '.h': (path) => new TextLoader(path), // C++ Header files + '.kt': (path) => new TextLoader(path), // Kotlin + '.java': (path) => new TextLoader(path), // Java + '.js': (path) => new TextLoader(path), // JavaScript + '.less': (path) => new TextLoader(path), // Less files + '.ts': (path) => new TextLoader(path), // TypeScript + '.php': (path) => new TextLoader(path), // PHP + '.proto': (path) => new TextLoader(path), // Protocol Buffers + '.python': (path) => new TextLoader(path), // Python + '.py': (path) => new TextLoader(path), // Python + '.rst': (path) => new TextLoader(path), // reStructuredText + '.ruby': (path) => new TextLoader(path), // Ruby + '.rb': (path) => new TextLoader(path), // Ruby + '.rs': (path) => new TextLoader(path), // Rust + '.scala': (path) => new TextLoader(path), // Scala + '.sc': (path) => new TextLoader(path), // Scala + '.scss': (path) => new TextLoader(path), // Sass + '.sol': (path) => new TextLoader(path), // Solidity + '.sql': (path) => new TextLoader(path), //SQL + '.swift': (path) => new TextLoader(path), // Swift + '.markdown': (path) => new TextLoader(path), // Markdown + '.md': (path) => new TextLoader(path), // Markdown + '.tex': (path) => new TextLoader(path), // LaTeX + '.ltx': (path) => new TextLoader(path), // LaTeX + '.html': (path) => new TextLoader(path), // HTML + '.vb': (path) => new TextLoader(path), // Visual Basic + '.xml': (path) => new TextLoader(path) // XML + }, + recursive + ) let docs = [] if (textSplitter) { diff --git a/packages/components/nodes/documentloaders/PlainText/PlainText.ts b/packages/components/nodes/documentloaders/PlainText/PlainText.ts index c2adceeb..c0c697a3 100644 --- a/packages/components/nodes/documentloaders/PlainText/PlainText.ts +++ b/packages/components/nodes/documentloaders/PlainText/PlainText.ts @@ -51,11 +51,13 @@ class PlainText_DocumentLoaders implements INode { { label: 'Document', name: 'document', - baseClasses: this.baseClasses + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/documentloaders/Playwright/Playwright.ts b/packages/components/nodes/documentloaders/Playwright/Playwright.ts index 2de166ce..55fa9608 100644 --- a/packages/components/nodes/documentloaders/Playwright/Playwright.ts +++ b/packages/components/nodes/documentloaders/Playwright/Playwright.ts @@ -167,7 +167,9 @@ class Playwright_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 @@ -184,7 +186,7 @@ class Playwright_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await playwrightLoader(page))) } } else { diff --git a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts index 3d28f310..90b5a277 100644 --- a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts +++ b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts @@ -168,7 +168,9 @@ class Puppeteer_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 @@ -185,7 +187,7 @@ class Puppeteer_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await puppeteerLoader(page))) } } else { diff --git a/packages/components/nodes/documentloaders/Text/Text.ts b/packages/components/nodes/documentloaders/Text/Text.ts index e41c5a9f..1eea709e 100644 --- a/packages/components/nodes/documentloaders/Text/Text.ts +++ b/packages/components/nodes/documentloaders/Text/Text.ts @@ -51,11 +51,13 @@ class Text_DocumentLoaders implements INode { { label: 'Document', name: 'document', - baseClasses: this.baseClasses + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts index c087e000..27ef36f5 100644 --- a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts +++ b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts @@ -51,11 +51,13 @@ class VectorStoreToDocument_DocumentLoaders implements INode { { label: 'Document', name: 'document', + description: 'Array of document objects containing metadata and pageContent', baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 00000000..92f320be --- /dev/null +++ b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,77 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + tags: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Azure OpenAI Embeddings' + this.name = 'azureOpenAIEmbeddingsLlamaIndex' + this.version = 1.0 + this.type = 'AzureOpenAIEmbeddings' + this.icon = 'Azure.svg' + this.category = 'Embeddings' + this.description = 'Azure OpenAI API embeddings specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 00000000..960197fe --- /dev/null +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,91 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'OpenAI Embedding' + this.name = 'openAIEmbedding_LlamaIndex' + this.version = 1.0 + this.type = 'OpenAIEmbedding' + this.icon = 'openai.svg' + this.category = 'Embeddings' + this.description = 'OpenAI Embedding specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'text-embedding-3-large', + name: 'text-embedding-3-large' + }, + { + label: 'text-embedding-3-small', + name: 'text-embedding-3-small' + }, + { + label: 'text-embedding-ada-002', + name: 'text-embedding-ada-002' + } + ], + default: 'text-embedding-ada-002', + optional: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + }, + { + label: 'BasePath', + name: 'basepath', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + const modelName = nodeData.inputs?.modelName as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + apiKey: openAIApiKey, + model: modelName + } + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts new file mode 100644 index 00000000..262ceb7c --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts @@ -0,0 +1,149 @@ +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { BaseNode, Metadata, BaseRetriever, LLM, ContextChatEngine, ChatMessage } from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class ContextChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Context Chat Engine' + this.name = 'contextChatEngine' + this.version = 1.0 + this.type = 'ContextChatEngine' + this.icon = 'context-chat-engine.png' + this.category = 'Engine' + this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: + 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as LLM + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory as FlowiseMemory + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + + const chatHistory = [] as ChatMessage[] + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever }) + + const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[] + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + let text = '' + let isStreamingStarted = false + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await chatEngine.chat({ message: input, chatHistory, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await chatEngine.chat({ message: input, chatHistory }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: text, + type: 'apiMessage' + } + ], + this.sessionId + ) + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: ContextChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts new file mode 100644 index 00000000..9bc9f3c0 --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts @@ -0,0 +1,124 @@ +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { LLM, ChatMessage, SimpleChatEngine } from 'llamaindex' + +class SimpleChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Simple Chat Engine' + this.name = 'simpleChatEngine' + this.version = 1.0 + this.type = 'SimpleChatEngine' + this.icon = 'chat-engine.png' + this.category = 'Engine' + this.description = 'Simple engine to handle back and forth conversations' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: 'You are a helpful assistant' + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as LLM + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory as FlowiseMemory + + const chatHistory = [] as ChatMessage[] + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + const chatEngine = new SimpleChatEngine({ llm: model }) + + const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[] + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + let text = '' + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await chatEngine.chat({ message: input, chatHistory, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + } else { + const response = await chatEngine.chat({ message: input, chatHistory }) + text = response?.response + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: text, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return text + } +} + +module.exports = { nodeClass: SimpleChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/chat-engine.png b/packages/components/nodes/engine/ChatEngine/chat-engine.png new file mode 100644 index 00000000..d614b888 Binary files /dev/null and b/packages/components/nodes/engine/ChatEngine/chat-engine.png differ diff --git a/packages/components/nodes/engine/ChatEngine/context-chat-engine.png b/packages/components/nodes/engine/ChatEngine/context-chat-engine.png new file mode 100644 index 00000000..ef4adc13 Binary files /dev/null and b/packages/components/nodes/engine/ChatEngine/context-chat-engine.png differ diff --git a/packages/components/nodes/engine/EngineUtils.ts b/packages/components/nodes/engine/EngineUtils.ts new file mode 100644 index 00000000..9424e789 --- /dev/null +++ b/packages/components/nodes/engine/EngineUtils.ts @@ -0,0 +1,12 @@ +import { BaseNode, Metadata } from 'llamaindex' + +export const reformatSourceDocuments = (sourceNodes: BaseNode[]) => { + const sourceDocuments = [] + for (const node of sourceNodes) { + sourceDocuments.push({ + pageContent: (node as any).text, + metadata: node.metadata + }) + } + return sourceDocuments +} diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts new file mode 100644 index 00000000..bd6e040d --- /dev/null +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -0,0 +1,143 @@ +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + RetrieverQueryEngine, + ResponseSynthesizer, + CompactAndRefine, + TreeSummarize, + Refine, + SimpleResponseBuilder, + BaseNode, + Metadata +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class QueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Query Engine' + this.name = 'queryEngine' + this.version = 1.0 + this.type = 'QueryEngine' + this.icon = 'query-engine.png' + this.category = 'Engine' + this.description = 'Simple query engine built to answer question over your data, without memory' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + + let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: QueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/QueryEngine/query-engine.png b/packages/components/nodes/engine/QueryEngine/query-engine.png new file mode 100644 index 00000000..68efdbe0 Binary files /dev/null and b/packages/components/nodes/engine/QueryEngine/query-engine.png differ diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts new file mode 100644 index 00000000..a872c0a2 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts @@ -0,0 +1,193 @@ +import { flatten } from 'lodash' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + TreeSummarize, + SimpleResponseBuilder, + Refine, + BaseEmbedding, + ResponseSynthesizer, + CompactAndRefine, + QueryEngineTool, + LLMQuestionGenerator, + SubQuestionQueryEngine, + BaseNode, + Metadata, + serviceContextFromDefaults +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class SubQuestionQueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Sub Question Query Engine' + this.name = 'subQuestionQueryEngine' + this.version = 1.0 + this.type = 'SubQuestionQueryEngine' + this.icon = 'subQueryEngine.svg' + this.category = 'Engine' + this.description = + 'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'QueryEngine Tools', + name: 'queryEngineTools', + type: 'QueryEngineTool', + list: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const serviceContext = serviceContextFromDefaults({ + llm: model, + embedModel: embeddings + }) + + let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[] + queryEngineTools = flatten(queryEngineTools) + + let queryEngine = SubQuestionQueryEngine.fromDefaults({ + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(serviceContext), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg new file mode 100644 index 00000000..b94c20b5 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 91c1d369..22da396e 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -117,7 +117,10 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P memoryKey: memoryKey ?? 'chat_history', chatHistory: dynamoDb, sessionId, - dynamodbClient: client + dynamodbClient: client, + tableName, + partitionKey, + dynamoKey: { [partitionKey]: { S: sessionId } } }) return memory } @@ -125,6 +128,9 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P interface BufferMemoryExtendedInput { dynamodbClient: DynamoDBClient sessionId: string + tableName: string + partitionKey: string + dynamoKey: Record } interface DynamoDBSerializedChatMessage { @@ -142,6 +148,10 @@ interface DynamoDBSerializedChatMessage { } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { + private tableName = '' + private partitionKey = '' + private dynamoKey: Record + private messageAttributeName: string sessionId = '' dynamodbClient: DynamoDBClient @@ -149,11 +159,14 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { super(fields) this.sessionId = fields.sessionId this.dynamodbClient = fields.dynamodbClient + this.tableName = fields.tableName + this.partitionKey = fields.partitionKey + this.dynamoKey = fields.dynamoKey } overrideDynamoKey(overrideSessionId = '') { - const existingDynamoKey = (this as any).dynamoKey - const partitionKey = (this as any).partitionKey + const existingDynamoKey = this.dynamoKey + const partitionKey = this.partitionKey let newDynamoKey: Record = {} @@ -209,9 +222,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.dynamodbClient) return [] - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName - const messageAttributeName = (this as any).messageAttributeName + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey + const tableName = this.tableName + const messageAttributeName = this.messageAttributeName const params: GetItemCommandInput = { TableName: tableName, @@ -236,9 +249,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.dynamodbClient) return - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName - const messageAttributeName = (this as any).messageAttributeName + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey + const tableName = this.tableName + const messageAttributeName = this.messageAttributeName const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -259,8 +272,8 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.dynamodbClient) return - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey + const tableName = this.tableName const params: DeleteItemCommandInput = { TableName: tableName, diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index b7309dcd..e2ee9f44 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -154,7 +154,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.collection) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const document = await this.collection.findOne({ sessionId: id }) const messages = document?.messages || [] const baseMessages = messages.map(mapStoredMessageToChatMessage) @@ -164,7 +164,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.collection) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -196,7 +196,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.collection) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.collection.deleteOne({ sessionId: id }) await this.clear() } diff --git a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts index 19506fc1..0b8f3800 100644 --- a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts +++ b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts @@ -141,7 +141,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId try { const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, { //@ts-ignore @@ -172,7 +172,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') const inputValues = { [this.inputKey ?? 'input']: input?.text } @@ -182,7 +182,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async clearChatMessages(overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.clear(id) } } diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index c54e07b5..965b6760 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -189,7 +189,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.redisClient) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) @@ -199,7 +199,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -219,7 +219,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.redisClient.del(id) await this.clear() } diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts index 3d7f6dbf..98a704ab 100644 --- a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts @@ -114,7 +114,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.redisClient) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange(id, 0, -1) const orderedMessages = rawStoredMessages.reverse() const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) @@ -125,7 +125,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -145,7 +145,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.redisClient.del(id) await this.clear() } diff --git a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts index 597eee8a..360a76d4 100644 --- a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts +++ b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts @@ -163,14 +163,14 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods { } async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const memoryVariables = await this.loadMemoryVariables({}, id) const baseMessages = memoryVariables[this.memoryKey] return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') const inputValues = { [this.inputKey ?? 'input']: input?.text } @@ -180,7 +180,7 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods { } async clearChatMessages(overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.clear(id) } } diff --git a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts new file mode 100644 index 00000000..e7fe8ea7 --- /dev/null +++ b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts @@ -0,0 +1,79 @@ +import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src' +import { BaseOutputParser } from 'langchain/schema/output_parser' +import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers' +import { CATEGORY } from '../OutputParserHelpers' +import { z } from 'zod' + +class AdvancedStructuredOutputParser implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Advanced Structured Output Parser' + this.name = 'advancedStructuredOutputParser' + this.version = 1.0 + this.type = 'AdvancedStructuredOutputParser' + this.description = 'Parse the output of an LLM call into a given structure by providing a Zod schema.' + this.icon = 'structure.svg' + this.category = CATEGORY + this.baseClasses = [this.type, ...getBaseClasses(BaseOutputParser)] + this.inputs = [ + { + label: 'Autofix', + name: 'autofixParser', + type: 'boolean', + optional: true, + description: 'In the event that the first call fails, will make another call to the model to fix any errors.' + }, + { + label: 'Example JSON', + name: 'exampleJson', + type: 'string', + description: 'Zod schema for the output of the model', + rows: 10, + default: `z.object({ + title: z.string(), // Title of the movie as a string + yearOfRelease: z.number().int(), // Release year as an integer number, + genres: z.enum([ + "Action", "Comedy", "Drama", "Fantasy", "Horror", + "Mystery", "Romance", "Science Fiction", "Thriller", "Documentary" + ]).array().max(2), // Array of genres, max of 2 from the defined enum + shortDescription: z.string().max(500) // Short description, max 500 characters +})` + } + ] + } + + async init(nodeData: INodeData): Promise { + const schemaString = nodeData.inputs?.exampleJson as string + const autoFix = nodeData.inputs?.autofixParser as boolean + + const zodSchemaFunction = new Function('z', `return ${schemaString}`) + const zodSchema = zodSchemaFunction(z) + + try { + const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema) + + // NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser + Object.defineProperty(structuredOutputParser, 'autoFix', { + enumerable: true, + configurable: true, + writable: true, + value: autoFix + }) + return structuredOutputParser + } catch (exception) { + throw new Error('Error parsing Zod Schema: ' + exception) + } + } +} + +module.exports = { nodeClass: AdvancedStructuredOutputParser } diff --git a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg new file mode 100644 index 00000000..3875982d --- /dev/null +++ b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts new file mode 100644 index 00000000..db998e1f --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class CompactRefine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Compact and Refine' + this.name = 'compactrefineLlamaIndex' + this.version = 1.0 + this.type = 'CompactRefine' + this.icon = 'compactrefine.svg' + this.category = 'Response Synthesizer' + this.description = + 'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' }) + } +} + +module.exports = { nodeClass: CompactRefine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg new file mode 100644 index 00000000..9ea95529 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/Refine/Refine.ts b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts new file mode 100644 index 00000000..267bc208 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class Refine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Refine' + this.name = 'refineLlamaIndex' + this.version = 1.0 + this.type = 'Refine' + this.icon = 'refine.svg' + this.category = 'Response Synthesizer' + this.description = + 'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' }) + } +} + +module.exports = { nodeClass: Refine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/Refine/refine.svg b/packages/components/nodes/responsesynthesizer/Refine/refine.svg new file mode 100644 index 00000000..1170c584 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/refine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts new file mode 100644 index 00000000..cb880020 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts @@ -0,0 +1,35 @@ +import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class SimpleResponseBuilder_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Response Builder' + this.name = 'simpleResponseBuilderLlamaIndex' + this.version = 1.0 + this.type = 'SimpleResponseBuilder' + this.icon = 'simplerb.svg' + this.category = 'Response Synthesizer' + this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.` + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [] + } + + async init(): Promise { + return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' }) + } +} + +module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg new file mode 100644 index 00000000..6f04fdc9 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts new file mode 100644 index 00000000..44872786 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts @@ -0,0 +1,56 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class TreeSummarize_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'TreeSummarize' + this.name = 'treeSummarizeLlamaIndex' + this.version = 1.0 + this.type = 'TreeSummarize' + this.icon = 'treesummarize.svg' + this.category = 'Response Synthesizer' + this.description = + 'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Prompt', + name: 'prompt', + type: 'string', + rows: 4, + default: `Context information from multiple sources is below. +--------------------- +{context} +--------------------- +Given the information from multiple sources and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const prompt = nodeData.inputs?.prompt as string + + const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' }) + } +} + +module.exports = { nodeClass: TreeSummarize_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg new file mode 100644 index 00000000..f81a3a53 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/base.ts b/packages/components/nodes/responsesynthesizer/base.ts new file mode 100644 index 00000000..68fd7f1a --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/base.ts @@ -0,0 +1,11 @@ +export class ResponseSynthesizerClass { + type: string + textQAPromptTemplate?: any + refinePromptTemplate?: any + + constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) { + this.type = params.type + this.textQAPromptTemplate = params.textQAPromptTemplate + this.refinePromptTemplate = params.refinePromptTemplate + } +} diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts index 442fdc7a..5e92505e 100644 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts @@ -94,11 +94,13 @@ class CohereRerankRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts index d1049fa4..16d40790 100644 --- a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts +++ b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts @@ -78,11 +78,13 @@ class EmbeddingsFilterRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts index 10fff764..a7cd9829 100644 --- a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts +++ b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts @@ -140,11 +140,13 @@ Passage:` { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts index 6b710cf3..9bace712 100644 --- a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts +++ b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts @@ -58,11 +58,13 @@ class LLMFilterCompressionRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts index ed15ed24..9788f095 100644 --- a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts +++ b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts @@ -89,11 +89,13 @@ class RRFRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts index 5f5a9ed0..6a6976a5 100644 --- a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts +++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts @@ -74,11 +74,13 @@ class SimilarityThresholdRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts new file mode 100644 index 00000000..163eff76 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts @@ -0,0 +1,68 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { VectorStoreIndex } from 'llamaindex' + +class QueryEngine_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs?: INodeParams[] + + constructor() { + this.label = 'QueryEngine Tool' + this.name = 'queryEngineToolLlamaIndex' + this.version = 1.0 + this.type = 'QueryEngineTool' + this.icon = 'queryEngineTool.svg' + this.category = 'Tools' + this.tags = ['LlamaIndex'] + this.description = 'Tool used to invoke query engine' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Vector Store Index', + name: 'vectorStoreIndex', + type: 'VectorStoreIndex' + }, + { + label: 'Tool Name', + name: 'toolName', + type: 'string', + description: 'Tool name must be small capital letter with underscore. Ex: my_tool' + }, + { + label: 'Tool Description', + name: 'toolDesc', + type: 'string', + rows: 4 + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex + const toolName = nodeData.inputs?.toolName as string + const toolDesc = nodeData.inputs?.toolDesc as string + const queryEngineTool = { + queryEngine: vectorStoreIndex.asQueryEngine({ + preFilters: { + ...(vectorStoreIndex as any).metadatafilter + } + }), + metadata: { + name: toolName, + description: toolDesc + }, + vectorStoreIndex + } + + return queryEngineTool + } +} + +module.exports = { nodeClass: QueryEngine_Tools } diff --git a/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg new file mode 100644 index 00000000..d49d8375 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts new file mode 100644 index 00000000..c0b2e5c1 --- /dev/null +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -0,0 +1,383 @@ +import { + BaseNode, + Document, + Metadata, + VectorStore, + VectorStoreQuery, + VectorStoreQueryResult, + serviceContextFromDefaults, + storageContextFromDefaults, + VectorStoreIndex, + BaseEmbedding +} from 'llamaindex' +import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' +import { flatten } from 'lodash' +import { Document as LCDocument } from 'langchain/document' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' + +class PineconeLlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Pinecone' + this.name = 'pineconeLlamaIndex' + this.version = 1.0 + this.type = 'Pinecone' + this.icon = 'pinecone.svg' + this.category = 'Vector Stores' + this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database` + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['pineconeApi'] + } + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Pinecone Index', + name: 'pineconeIndex', + type: 'string' + }, + { + label: 'Pinecone Namespace', + name: 'pineconeNamespace', + type: 'string', + placeholder: 'my-first-namespace', + additionalParams: true, + optional: true + }, + { + label: 'Pinecone Metadata Filter', + name: 'pineconeMetadataFilter', + type: 'json', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + } + ] + this.outputs = [ + { + label: 'Pinecone Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Pinecone Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + + const pcvs = new PineconeVectorStore({ + indexName, + apiKey: pineconeApiKey, + namespace: pineconeNamespace + }) + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + + const obj: PineconeParams = { + indexName, + apiKey: pineconeApiKey + } + + if (pineconeNamespace) obj.namespace = pineconeNamespace + + let metadatafilter = {} + if (pineconeMetadataFilter) { + metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + obj.queryFilter = metadatafilter + } + + const pcvs = new PineconeVectorStore(obj) + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + const index = await VectorStoreIndex.init({ + nodes: [], + storageContext, + serviceContext + }) + + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + if (metadatafilter) { + ;(index as any).metadatafilter = metadatafilter + } + return index + } + return index + } +} + +type PineconeParams = { + indexName: string + apiKey: string + namespace?: string + chunkSize?: number + queryFilter?: object +} + +class PineconeVectorStore implements VectorStore { + storesText: boolean = true + db?: Pinecone + indexName: string + apiKey: string + chunkSize: number + namespace?: string + queryFilter?: object + + constructor(params: PineconeParams) { + this.indexName = params?.indexName + this.apiKey = params?.apiKey + this.namespace = params?.namespace ?? '' + this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100') + this.queryFilter = params?.queryFilter ?? {} + } + + private async getDb(): Promise { + if (!this.db) { + this.db = new Pinecone({ + apiKey: this.apiKey + }) + } + return Promise.resolve(this.db) + } + + client() { + return this.getDb() + } + + async index() { + const db: Pinecone = await this.getDb() + return db.Index(this.indexName) + } + + async clearIndex() { + const db: Pinecone = await this.getDb() + return await db.index(this.indexName).deleteAll() + } + + async add(embeddingResults: BaseNode[]): Promise { + if (embeddingResults.length == 0) { + return Promise.resolve([]) + } + + const idx: Index = await this.index() + const nodes = embeddingResults.map(this.nodeToRecord) + + for (let i = 0; i < nodes.length; i += this.chunkSize) { + const chunk = nodes.slice(i, i + this.chunkSize) + const result = await this.saveChunk(idx, chunk) + if (!result) { + return Promise.reject() + } + } + return Promise.resolve([]) + } + + protected async saveChunk(idx: Index, chunk: any) { + try { + const namespace = idx.namespace(this.namespace ?? '') + await namespace.upsert(chunk) + return true + } catch (err) { + return false + } + } + + async delete(refDocId: string): Promise { + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + return namespace.deleteOne(refDocId) + } + + async query(query: VectorStoreQuery): Promise { + const queryOptions: any = { + vector: query.queryEmbedding, + topK: query.similarityTopK, + filter: this.queryFilter + } + + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + const results = await namespace.query(queryOptions) + + const idList = results.matches.map((row) => row.id) + const records: FetchResponse = await namespace.fetch(idList) + const rows = Object.values(records.records) + + const nodes = rows.map((row) => { + return new Document({ + id_: row.id, + text: this.textFromResultRow(row), + metadata: this.metaWithoutText(row.metadata), + embedding: row.values + }) + }) + + const result = { + nodes: nodes, + similarities: results.matches.map((row) => row.score || 999), + ids: results.matches.map((row) => row.id) + } + + return Promise.resolve(result) + } + + /** + * Required by VectorStore interface. Currently ignored. + */ + persist(): Promise { + return Promise.resolve() + } + + textFromResultRow(row: ScoredPineconeRecord): string { + return row.metadata?.text ?? '' + } + + metaWithoutText(meta: Metadata): any { + return Object.keys(meta) + .filter((key) => key != 'text') + .reduce((acc: any, key: string) => { + acc[key] = meta[key] + return acc + }, {}) + } + + nodeToRecord(node: BaseNode) { + let id: any = node.id_.length ? node.id_ : null + return { + id: id, + values: node.getEmbedding(), + metadata: { + ...cleanupMetadata(node.metadata), + text: (node as any).text + } + } + } +} + +const cleanupMetadata = (nodeMetadata: ICommonObject) => { + // Pinecone doesn't support nested objects, so we flatten them + const documentMetadata: any = { ...nodeMetadata } + // preserve string arrays which are allowed + const stringArrays: Record = {} + for (const key of Object.keys(documentMetadata)) { + if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) { + stringArrays[key] = documentMetadata[key] + delete documentMetadata[key] + } + } + const metadata: { + [key: string]: string | number | boolean | string[] | null + } = { + ...flattenObject(documentMetadata), + ...stringArrays + } + // Pinecone doesn't support null values, so we remove them + for (const key of Object.keys(metadata)) { + if (metadata[key] == null) { + delete metadata[key] + } else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) { + delete metadata[key] + } + } + return metadata +} + +module.exports = { nodeClass: PineconeLlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 4e8bae32..be7784cc 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode { constructor() { this.label = 'Postgres' this.name = 'postgres' - this.version = 2.0 + this.version = 3.0 this.type = 'Postgres' this.icon = 'postgres.svg' this.category = 'Vector Stores' @@ -60,13 +60,6 @@ class Postgres_VectorStores implements INode { name: 'database', type: 'string' }, - { - label: 'SSL Connection', - name: 'sslConnection', - type: 'boolean', - default: false, - optional: false - }, { label: 'Port', name: 'port', @@ -124,7 +117,6 @@ class Postgres_VectorStores implements INode { const docs = nodeData.inputs?.document as Document[] const embeddings = nodeData.inputs?.embeddings as Embeddings const additionalConfig = nodeData.inputs?.additionalConfig as string - const sslConnection = nodeData.inputs?.sslConnection as boolean let additionalConfiguration = {} if (additionalConfig) { @@ -142,8 +134,7 @@ class Postgres_VectorStores implements INode { port: nodeData.inputs?.port as number, username: user, password: password, - database: nodeData.inputs?.database as string, - ssl: sslConnection + database: nodeData.inputs?.database as string } const args = { @@ -198,7 +189,8 @@ class Postgres_VectorStores implements INode { type: 'postgres', host: nodeData.inputs?.host as string, port: nodeData.inputs?.port as number, - username: user, + username: user, // Required by TypeORMVectorStore + user: user, // Required by Pool in similaritySearchVectorWithScore password: password, database: nodeData.inputs?.database as string } @@ -248,14 +240,7 @@ const similaritySearchVectorWithScore = async ( ORDER BY "_distance" ASC LIMIT $3;` - const poolOptions = { - host: postgresConnectionOptions.host, - port: postgresConnectionOptions.port, - user: postgresConnectionOptions.username, - password: postgresConnectionOptions.password, - database: postgresConnectionOptions.database - } - const pool = new Pool(poolOptions) + const pool = new Pool(postgresConnectionOptions) const conn = await pool.connect() const documents = await conn.query(queryString, [embeddingString, _filter, k]) diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts new file mode 100644 index 00000000..36c383e9 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -0,0 +1,145 @@ +import path from 'path' +import { flatten } from 'lodash' +import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex' +import { Document as LCDocument } from 'langchain/document' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getUserHome } from '../../../src' + +class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'SimpleStore' + this.name = 'simpleStoreLlamaIndex' + this.version = 1.0 + this.type = 'SimpleVectorStore' + this.icon = 'simplevs.svg' + this.category = 'Vector Stores' + this.description = 'Upsert embedded data to local path and perform similarity search' + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Base Path to store', + name: 'basePath', + description: + 'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored', + type: 'string', + optional: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + optional: true + } + ] + this.outputs = [ + { + label: 'SimpleStore Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'SimpleStore Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + const index = await VectorStoreIndex.init({ storageContext, serviceContext }) + + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + return index + } + return index + } +} + +module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg new file mode 100644 index 00000000..52c74432 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/packages/components/package.json b/packages/components/package.json index 13423e2b..4e4a5757 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "1.5.3", + "version": "1.6.0", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", @@ -62,6 +62,7 @@ "langfuse-langchain": "2.3.3", "langsmith": "0.0.53", "linkifyjs": "^4.1.1", + "llamaindex": "^0.0.48", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", "moment": "^2.29.3", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 2081b154..44818733 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -98,6 +98,7 @@ export interface INodeProperties { version: number category: string // TODO: use enum instead of string baseClasses: string[] + tags?: string[] description?: string filePath?: string badge?: string diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 0f9529a0..68b9f850 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -662,6 +662,28 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => { } } +/** + * Flatten nested object + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const flattenObject = (obj: ICommonObject, parentKey?: string) => { + let result: any = {} + + Object.keys(obj).forEach((key) => { + const value = obj[key] + const _key = parentKey ? parentKey + '.' + key : key + if (typeof value === 'object') { + result = { ...result, ...flattenObject(value, _key) } + } else { + result[_key] = value + } + }) + + return result +} + /** * Convert BaseMessage to IMessage * @param {BaseMessage[]} messages diff --git a/packages/server/.env.example b/packages/server/.env.example index e2eb833f..5f22cafd 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -21,6 +21,7 @@ PORT=3000 # FLOWISE_USERNAME=user # FLOWISE_PASSWORD=1234 # FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey +# FLOWISE_FILE_SIZE_LIMIT=50mb # DEBUG=true # LOG_LEVEL=debug (error | warn | info | verbose | debug) # TOOL_FUNCTION_BUILTIN_DEP=crypto,fs diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index 87f6d6d2..621529fc 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -1,5 +1,7 @@ { "description": "Use OpenAI Function Agent and Chain to automatically decide which API to call, generating url and body request from conversation", + "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,OpenAI Function Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index af99be9d..9d5a6c54 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -1,5 +1,7 @@ { "description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation", + "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json new file mode 100644 index 00000000..3fd71988 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json @@ -0,0 +1,464 @@ +{ + "description": "Return response as a JSON structure as specified by a Zod schema", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 508, + "id": "llmChain_0", + "position": { + "x": 1229.1699649849293, + "y": 245.55173505632646 + }, + "type": "customNode", + "data": { + "id": "llmChain_0", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": ["LLMChain", "BaseChain", "Runnable"], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_0-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_0-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_0-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_0.data.instance}}", + "prompt": "{{chatPromptTemplate_0.data.instance}}", + "outputParser": "{{advancedStructuredOutputParser_0.data.instance}}", + "chainName": "", + "inputModeration": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_0-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "positionAbsolute": { + "x": 1229.1699649849293, + "y": 245.55173505632646 + }, + "selected": false + }, + { + "width": 300, + "height": 690, + "id": "chatPromptTemplate_0", + "position": { + "x": 493.26582927222483, + "y": -156.20470841335592 + }, + "type": "customNode", + "data": { + "id": "chatPromptTemplate_0", + "label": "Chat Prompt Template", + "version": 1, + "name": "chatPromptTemplate", + "type": "ChatPromptTemplate", + "baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"], + "category": "Prompts", + "description": "Schema to represent a chat prompt", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.", + "id": "chatPromptTemplate_0-input-systemMessagePrompt-string" + }, + { + "label": "Human Message", + "name": "humanMessagePrompt", + "type": "string", + "rows": 4, + "placeholder": "{text}", + "id": "chatPromptTemplate_0-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "chatPromptTemplate_0-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "systemMessagePrompt": "This AI is designed to only output information in JSON format without exception. This AI can only output JSON and will never output any other text.\n\nWhen asked to correct itself, this AI will only output the corrected JSON and never any other text.", + "humanMessagePrompt": "{text}", + "promptValues": "" + }, + "outputAnchors": [ + { + "id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", + "name": "chatPromptTemplate", + "label": "ChatPromptTemplate", + "type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 493.26582927222483, + "y": -156.20470841335592 + }, + "dragging": false + }, + { + "width": 300, + "height": 576, + "id": "chatOpenAI_0", + "position": { + "x": 860.555928011636, + "y": -355.71028569475095 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 3, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 860.555928011636, + "y": -355.71028569475095 + }, + "dragging": false + }, + { + "width": 300, + "height": 454, + "id": "advancedStructuredOutputParser_0", + "position": { + "x": 489.3637511211284, + "y": 580.0628053662244 + }, + "type": "customNode", + "data": { + "id": "advancedStructuredOutputParser_0", + "label": "Advanced Structured Output Parser", + "version": 1, + "name": "advancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser", + "baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"], + "category": "Output Parsers", + "description": "Parse the output of an LLM call into a given structure by providing a Zod schema.", + "inputParams": [ + { + "label": "Autofix", + "name": "autofixParser", + "type": "boolean", + "optional": true, + "description": "In the event that the first call fails, will make another call to the model to fix any errors.", + "id": "advancedStructuredOutputParser_0-input-autofixParser-boolean" + }, + { + "label": "Example JSON", + "name": "exampleJson", + "type": "string", + "description": "Zod schema for the output of the model", + "rows": 10, + "default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})", + "id": "advancedStructuredOutputParser_0-input-exampleJson-string" + } + ], + "inputAnchors": [], + "inputs": { + "autofixParser": "", + "exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})" + }, + "outputAnchors": [ + { + "id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "name": "advancedStructuredOutputParser", + "label": "AdvancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 489.3637511211284, + "y": 580.0628053662244 + } + } + ], + "edges": [ + { + "source": "chatPromptTemplate_0", + "sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel" + }, + { + "source": "advancedStructuredOutputParser_0", + "sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", + "type": "buttonedge", + "id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Antonym.json b/packages/server/marketplaces/chatflows/Antonym.json index ef997feb..97c5af71 100644 --- a/packages/server/marketplaces/chatflows/Antonym.json +++ b/packages/server/marketplaces/chatflows/Antonym.json @@ -1,5 +1,7 @@ { "description": "Output antonym of given user input using few-shot prompt template built with examples", + "categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index 4edbf823..c0ed0807 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -1,5 +1,7 @@ { "description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion", + "categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index 3137d511..14976ad3 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -1,5 +1,7 @@ { "description": "Use BabyAGI to create tasks and reprioritize for a given objective", + "categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index e16377d2..3439625b 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -1,5 +1,7 @@ { "description": "Analyse and summarize CSV data", + "categories": "CSV Agent,ChatOpenAI,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index f8d8d26c..c87b3f2c 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -1,5 +1,7 @@ { "description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions", + "categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index 12bea993..3777b637 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -1,5 +1,7 @@ { "description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools", + "categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index 7b32de48..48be286d 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -1,5 +1,7 @@ { "description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA", + "categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, @@ -439,10 +441,10 @@ "type": "options", "options": [ { - "id": "plainText_0-output-document-Document", + "id": "plainText_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_0-output-text-string|json", diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json new file mode 100644 index 00000000..3f1152f2 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -0,0 +1,919 @@ +{ + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", + "categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis", + "framework": "LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 438, + "id": "textFile_0", + "position": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "type": "customNode", + "data": { + "id": "textFile_0", + "label": "Text File", + "version": 3, + "name": "textFile", + "type": "Document", + "baseClasses": ["Document"], + "category": "Document Loaders", + "description": "Load data from text files", + "inputParams": [ + { + "label": "Txt File", + "name": "txtFile", + "type": "file", + "fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml", + "id": "textFile_0-input-txtFile-file" + }, + { + "label": "Metadata", + "name": "metadata", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "textFile_0-input-metadata-json" + } + ], + "inputAnchors": [ + { + "label": "Text Splitter", + "name": "textSplitter", + "type": "TextSplitter", + "optional": true, + "id": "textFile_0-input-textSplitter-TextSplitter" + } + ], + "inputs": { + "textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}", + "metadata": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "textFile_0-output-document-Document|json", + "name": "document", + "label": "Document", + "type": "Document | json" + }, + { + "id": "textFile_0-output-text-string|json", + "name": "text", + "label": "Text", + "type": "string | json" + } + ], + "default": "document" + } + ], + "outputs": { + "output": "document" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "dragging": false + }, + { + "width": 300, + "height": 429, + "id": "recursiveCharacterTextSplitter_0", + "position": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "type": "customNode", + "data": { + "id": "recursiveCharacterTextSplitter_0", + "label": "Recursive Character Text Splitter", + "version": 2, + "name": "recursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter", + "baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"], + "category": "Text Splitters", + "description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"", + "inputParams": [ + { + "label": "Chunk Size", + "name": "chunkSize", + "type": "number", + "default": 1000, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkSize-number" + }, + { + "label": "Chunk Overlap", + "name": "chunkOverlap", + "type": "number", + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number" + }, + { + "label": "Custom Separators", + "name": "separators", + "type": "string", + "rows": 4, + "description": "Array of custom separators to determine when to split the text, will override the default separators", + "placeholder": "[\"|\", \"##\", \">\", \"-\"]", + "additionalParams": true, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-separators-string" + } + ], + "inputAnchors": [], + "inputs": { + "chunkSize": 1000, + "chunkOverlap": "", + "separators": "" + }, + "outputAnchors": [ + { + "id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "name": "recursiveCharacterTextSplitter", + "label": "RecursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": ["{{textFile_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "dragging": false + }, + { + "width": 300, + "height": 513, + "id": "contextChatEngine_0", + "position": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "type": "customNode", + "data": { + "id": "contextChatEngine_0", + "label": "Context Chat Engine", + "version": 1, + "name": "contextChatEngine", + "type": "ContextChatEngine", + "baseClasses": ["ContextChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "contextChatEngine_0-input-returnSourceDocuments-boolean" + }, + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", + "id": "contextChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "contextChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "memory": "{{RedisBackedChatMemory_0.data.instance}}", + "systemMessagePrompt": "", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine", + "name": "contextChatEngine", + "label": "ContextChatEngine", + "type": "ContextChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "dragging": false + }, + { + "width": 300, + "height": 329, + "id": "RedisBackedChatMemory_0", + "position": { + "x": 1081.252815805786, + "y": 990.1701092562037 + }, + "type": "customNode", + "data": { + "id": "RedisBackedChatMemory_0", + "label": "Redis-Backed Chat Memory", + "version": 2, + "name": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory", + "baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Summarizes the conversation and stores the memory in Redis server", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "optional": true, + "credentialNames": ["redisCacheApi", "redisCacheUrlApi"], + "id": "RedisBackedChatMemory_0-input-credential-credential" + }, + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionId-string" + }, + { + "label": "Session Timeouts", + "name": "sessionTTL", + "type": "number", + "description": "Omit this parameter to make sessions never expire", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionTTL-number" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "RedisBackedChatMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "sessionTTL": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "name": "RedisBackedChatMemory", + "label": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1081.252815805786, + "y": 990.1701092562037 + } + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_2", + "position": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_2", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "recursiveCharacterTextSplitter_0", + "sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "target": "textFile_0", + "targetHandle": "textFile_0-input-textSplitter-TextSplitter", + "type": "buttonedge", + "id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter", + "data": { + "label": "" + } + }, + { + "source": "textFile_0", + "sourceHandle": "textFile_0-output-document-Document|json", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-document-Document", + "type": "buttonedge", + "id": "textFile_0-textFile_0-output-document-Document|json-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "RedisBackedChatMemory_0", + "sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_2", + "sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index 031a29c0..4cb736a0 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -1,5 +1,7 @@ { "description": "A conversational agent for a chat model which utilize chat specific prompts", + "categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 40c689f5..a4ec6b5b 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -1,6 +1,8 @@ { "description": "Agent optimized for vector retrieval during conversation and answering questions based on previous dialogue.", + "categories": "Retriever Tool,Buffer Memory,ChatOpenAI,Conversational Retrieval Agent, Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index e73a9d28..f76e89e6 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -1,6 +1,8 @@ { "description": "Text file QnA using conversational retrieval QA chain", + "categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, @@ -233,10 +235,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -730,11 +732,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "pinecone_0", "targetHandle": "pinecone_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 6975fc68..31d65c48 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -1,6 +1,8 @@ { "description": "Flowise Docs Github QnA using conversational retrieval QA chain", + "categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json index 93009574..6e7154b7 100644 --- a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json +++ b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json @@ -1,5 +1,7 @@ { "description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model", + "categories": "HuggingFace,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/IfElse.json b/packages/server/marketplaces/chatflows/IfElse.json index f3fddebf..e3b66f44 100644 --- a/packages/server/marketplaces/chatflows/IfElse.json +++ b/packages/server/marketplaces/chatflows/IfElse.json @@ -1,5 +1,7 @@ { "description": "Split flows based on if else condition", + "categories": "IfElse Function,ChatOpenAI,OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Image Generation.json b/packages/server/marketplaces/chatflows/Image Generation.json index 7dafcedf..46cb79ec 100644 --- a/packages/server/marketplaces/chatflows/Image Generation.json +++ b/packages/server/marketplaces/chatflows/Image Generation.json @@ -1,6 +1,8 @@ { "description": "Generate image using Replicate Stability text-to-image generative AI model", "badge": "NEW", + "categories": "Replicate,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Input Moderation.json b/packages/server/marketplaces/chatflows/Input Moderation.json index ed823a21..bd449777 100644 --- a/packages/server/marketplaces/chatflows/Input Moderation.json +++ b/packages/server/marketplaces/chatflows/Input Moderation.json @@ -1,6 +1,8 @@ { "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", "badge": "NEW", + "categories": "Moderation,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/List Output Parser.json b/packages/server/marketplaces/chatflows/List Output Parser.json index eaf56dff..0eb269b4 100644 --- a/packages/server/marketplaces/chatflows/List Output Parser.json +++ b/packages/server/marketplaces/chatflows/List Output Parser.json @@ -1,6 +1,8 @@ { "description": "Return response as a list (array) instead of a string/text", "badge": "NEW", + "categories": "CSV Output Parser,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 6f78cb05..3e8b93f6 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -1,6 +1,8 @@ { "description": "QnA chain using Ollama local LLM, LocalAI embedding model, and Faiss local vector store", "badge": "POPULAR", + "categories": "Text File,ChatOllama,Conversational Retrieval QA Chain,Faiss,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, @@ -224,10 +226,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -649,11 +651,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "faiss_0", "targetHandle": "faiss_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-faiss_0-faiss_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index 1b3e48e1..c5681d3d 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -1,5 +1,7 @@ { "description": "Use long term memory like Zep to differentiate conversations between users with sessionId", + "categories": "ChatOpenAI,Conversational Retrieval QA Chain,Zep Memory,Qdrant,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index ef928854..ed2efb9f 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -1,6 +1,8 @@ { "description": "Upsert multiple files with metadata and filter by it using conversational retrieval QA chain", + "categories": "Text File,PDF File,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, @@ -126,10 +128,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -836,11 +838,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "pinecone_0", "targetHandle": "pinecone_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 314e24a6..97cca308 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -1,5 +1,7 @@ { "description": "A chain that automatically picks an appropriate prompt from multiple prompts", + "categories": "ChatOpenAI,Multi Prompt Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 8c9e8537..6b8f2c33 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -1,5 +1,7 @@ { "description": "A chain that automatically picks an appropriate retriever from multiple different vector databases", + "categories": "ChatOpenAI,Multi Retrieval QA Chain,Pinecone,Chroma,Supabase,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index e5a16caa..b76270e7 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -1,5 +1,7 @@ { "description": "Use the agent to choose between multiple different vector databases, with the ability to use other tools", + "categories": "Buffer Memory,ChatOpenAI,Chain Tool,Retrieval QA Chain,Redis,Faiss,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, @@ -964,10 +966,10 @@ "type": "options", "options": [ { - "id": "plainText_0-output-document-Document", + "id": "plainText_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_0-output-text-string|json", @@ -1501,10 +1503,10 @@ "type": "options", "options": [ { - "id": "plainText_1-output-document-Document", + "id": "plainText_1-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_1-output-text-string|json", @@ -1721,11 +1723,11 @@ }, { "source": "plainText_0", - "sourceHandle": "plainText_0-output-document-Document", + "sourceHandle": "plainText_0-output-document-Document|json", "target": "redis_0", "targetHandle": "redis_0-input-document-Document", "type": "buttonedge", - "id": "plainText_0-plainText_0-output-document-Document-redis_0-redis_0-input-document-Document", + "id": "plainText_0-plainText_0-output-document-Document|json-redis_0-redis_0-input-document-Document", "data": { "label": "" } @@ -1776,11 +1778,11 @@ }, { "source": "plainText_1", - "sourceHandle": "plainText_1-output-document-Document", + "sourceHandle": "plainText_1-output-document-Document|json", "target": "faiss_0", "targetHandle": "faiss_0-input-document-Document", "type": "buttonedge", - "id": "plainText_1-plainText_1-output-document-Document-faiss_0-faiss_0-input-document-Document", + "id": "plainText_1-plainText_1-output-document-Document|json-faiss_0-faiss_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index e3e80dcc..6f35e595 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -1,5 +1,7 @@ { "description": "An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call", + "categories": "Buffer Memory,Custom Tool, SerpAPI,OpenAI Function,Calculator Tool,ChatOpenAI,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/OpenAI Assistant.json b/packages/server/marketplaces/chatflows/OpenAI Assistant.json index e9311c97..73c01413 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Assistant.json +++ b/packages/server/marketplaces/chatflows/OpenAI Assistant.json @@ -1,5 +1,7 @@ { "description": "OpenAI Assistant that has instructions and can leverage models, tools, and knowledge to respond to user queries", + "categories": "Custom Tool, SerpAPI,OpenAI Assistant,Calculator Tool,Langchain", + "framework": "Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json index c2060e79..d225e41a 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json @@ -1,6 +1,8 @@ { "description": "Use chat history to rephrase user question, and answer the rephrased question using retrieved docs from vector store", + "categories": "ChatOpenAI,LLM Chain,SingleStore,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining.json b/packages/server/marketplaces/chatflows/Prompt Chaining.json index 267d8222..42debac8 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining.json @@ -1,5 +1,7 @@ { "description": "Use output from a chain as prompt for another chain", + "categories": "Custom Tool,OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json new file mode 100644 index 00000000..b3a3c292 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -0,0 +1,549 @@ +{ + "description": "Stateless query engine designed to answer question over your data using LlamaIndex", + "categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex", + "badge": "NEW", + "framework": "LlamaIndex", + "nodes": [ + { + "width": 300, + "height": 382, + "id": "queryEngine_0", + "position": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "type": "customNode", + "data": { + "id": "queryEngine_0", + "label": "Query Engine", + "version": 1, + "name": "queryEngine", + "type": "QueryEngine", + "baseClasses": ["QueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "queryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "queryEngine_0-output-queryEngine-QueryEngine", + "name": "queryEngine", + "label": "QueryEngine", + "type": "QueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": "", + "model": "{{chatAnthropic_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "dragging": false + }, + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "Context information:\n\n{context}\n\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatAnthropic_LlamaIndex_0", + "position": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "type": "customNode", + "data": { + "id": "chatAnthropic_LlamaIndex_0", + "label": "ChatAnthropic", + "version": 1, + "name": "chatAnthropic_LlamaIndex", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "claude-2", + "name": "claude-2", + "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-2.1", + "name": "claude-2.1", + "description": "Claude 2 latest full version" + }, + { + "label": "claude-instant-1", + "name": "claude-instant-1", + "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-v1", + "name": "claude-v1" + }, + { + "label": "claude-v1-100k", + "name": "claude-v1-100k" + }, + { + "label": "claude-v1.0", + "name": "claude-v1.0" + }, + { + "label": "claude-v1.2", + "name": "claude-v1.2" + }, + { + "label": "claude-v1.3", + "name": "claude-v1.3" + }, + { + "label": "claude-v1.3-100k", + "name": "claude-v1.3-100k" + }, + { + "label": "claude-instant-v1", + "name": "claude-instant-v1" + }, + { + "label": "claude-instant-v1-100k", + "name": "claude-instant-v1-100k" + }, + { + "label": "claude-instant-v1.0", + "name": "claude-instant-v1.0" + }, + { + "label": "claude-instant-v1.1", + "name": "claude-instant-v1.1" + }, + { + "label": "claude-instant-v1.1-100k", + "name": "claude-instant-v1.1-100k" + } + ], + "default": "claude-2", + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number" + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-topP-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "claude-2", + "temperature": 0.9, + "maxTokensToSample": "", + "topP": "" + }, + "outputAnchors": [ + { + "id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "name": "chatAnthropic_LlamaIndex", + "label": "ChatAnthropic", + "type": "ChatAnthropic | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "data": { + "label": "" + } + }, + { + "source": "chatAnthropic_LlamaIndex_0", + "sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index e4a7fab8..5fd191fe 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -1,5 +1,7 @@ { "description": "An agent that uses ReAct logic to decide what action to take", + "categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Replicate LLM.json b/packages/server/marketplaces/chatflows/Replicate LLM.json index 832e85c7..578983cf 100644 --- a/packages/server/marketplaces/chatflows/Replicate LLM.json +++ b/packages/server/marketplaces/chatflows/Replicate LLM.json @@ -1,5 +1,7 @@ { "description": "Use Replicate API that runs Llama 13b v2 model with LLMChain", + "categories": "Replicate,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index 92e42178..ec9d465d 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -1,5 +1,7 @@ { "description": "Answer questions over a SQL database", + "categories": "ChatOpenAI,Sql Database Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL Prompt.json b/packages/server/marketplaces/chatflows/SQL Prompt.json index 406c2e52..8d2691c6 100644 --- a/packages/server/marketplaces/chatflows/SQL Prompt.json +++ b/packages/server/marketplaces/chatflows/SQL Prompt.json @@ -1,5 +1,7 @@ { "description": "Manually construct prompts to query a SQL database", + "categories": "IfElse Function,Variable Set/Get,Custom JS Function,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json new file mode 100644 index 00000000..fd17ded1 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -0,0 +1,272 @@ +{ + "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", + "categories": "BufferMemory,AzureChatOpenAI,LlamaIndex", + "framework": "LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 462, + "id": "simpleChatEngine_0", + "position": { + "x": 1210.127368000538, + "y": 324.98110560103896 + }, + "type": "customNode", + "data": { + "id": "simpleChatEngine_0", + "label": "Simple Chat Engine", + "version": 1, + "name": "simpleChatEngine", + "type": "SimpleChatEngine", + "baseClasses": ["SimpleChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple engine to handle back and forth conversations", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "You are a helpful assistant", + "id": "simpleChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "simpleChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}", + "memory": "{{bufferMemory_0.data.instance}}", + "systemMessagePrompt": "You are a helpful assistant." + }, + "outputAnchors": [ + { + "id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine", + "name": "simpleChatEngine", + "label": "SimpleChatEngine", + "type": "SimpleChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1210.127368000538, + "y": 324.98110560103896 + } + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "azureChatOpenAI_LlamaIndex_0", + "position": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "type": "customNode", + "data": { + "id": "azureChatOpenAI_LlamaIndex_0", + "label": "AzureChatOpenAI", + "version": 1, + "name": "azureChatOpenAI_LlamaIndex", + "type": "AzureChatOpenAI", + "baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["azureOpenAIApi"], + "id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + } + ], + "default": "gpt-3.5-turbo-16k", + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "name": "azureChatOpenAI_LlamaIndex", + "label": "AzureChatOpenAI", + "type": "AzureChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "azureChatOpenAI_LlamaIndex_0", + "sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json index 1ffbee44..53cfeace 100644 --- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json +++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json @@ -1,5 +1,7 @@ { "description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT", + "categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain", + "framework": "Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple LLM Chain.json b/packages/server/marketplaces/chatflows/Simple LLM Chain.json index f2e3a4a2..36a5a8d8 100644 --- a/packages/server/marketplaces/chatflows/Simple LLM Chain.json +++ b/packages/server/marketplaces/chatflows/Simple LLM Chain.json @@ -1,5 +1,7 @@ { "description": "Basic example of stateless (no memory) LLM Chain with a Prompt Template and LLM Model", + "categories": "OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index 92336443..9801a90f 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -1,5 +1,7 @@ { "description": "Return response as a specified JSON structure instead of a string/text", + "categories": "Structured Output Parser,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json new file mode 100644 index 00000000..620712c4 --- /dev/null +++ b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json @@ -0,0 +1,1203 @@ +{ + "description": "Breaks down query into sub questions for each relevant data source, then combine into final response", + "categories": "Sub Question Query Engine,Sticky Note,QueryEngine Tool,Compact and Refine,ChatOpenAI,Pinecone,LlamaIndex", + "framework": "LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nThe original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nContext information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_0", + "position": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_0", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": -451.0082548287243, + "y": -127.15143353229783 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -451.0082548287243, + "y": -127.15143353229783 + } + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_0", + "position": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_0", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_0-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_0-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_1.data.instance}}", + "toolName": "apple_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_1", + "position": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_1", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_1-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "dragging": false + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_1", + "position": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_1", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_1-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_1-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_0.data.instance}}", + "toolName": "tesla_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "dragging": false + }, + { + "width": 300, + "height": 484, + "id": "subQuestionQueryEngine_0", + "position": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "type": "customNode", + "data": { + "id": "subQuestionQueryEngine_0", + "label": "Sub Question Query Engine", + "version": 1, + "name": "subQuestionQueryEngine", + "type": "SubQuestionQueryEngine", + "baseClasses": ["SubQuestionQueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "subQuestionQueryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "QueryEngine Tools", + "name": "queryEngineTools", + "type": "QueryEngineTool", + "list": true, + "id": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "queryEngineTools": ["{{queryEngineToolLlamaIndex_1.data.instance}}", "{{queryEngineToolLlamaIndex_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_1.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "subQuestionQueryEngine_0-output-subQuestionQueryEngine-SubQuestionQueryEngine", + "name": "subQuestionQueryEngine", + "label": "SubQuestionQueryEngine", + "type": "SubQuestionQueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_1", + "position": { + "x": -437.3136244622061, + "y": 329.99986619821175 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_1", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -437.3136244622061, + "y": 329.99986619821175 + } + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_0", + "position": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_0-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"apple\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_0-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "dragging": false + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_1", + "position": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_1", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_1-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"tesla\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_1-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "dragging": false + }, + { + "width": 300, + "height": 163, + "id": "stickyNote_2", + "position": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_2", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_2-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Break questions into subqueries, then retrieve corresponding context using queryengine tool.\n\nThis implementation does not contains memory, we can use OpenAI Agent to function call this flow" + }, + "outputAnchors": [ + { + "id": "stickyNote_2-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "pineconeLlamaIndex_1", + "sourceHandle": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_0", + "targetHandle": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_1-pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_1", + "targetHandle": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "queryEngineToolLlamaIndex_1", + "sourceHandle": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "queryEngineToolLlamaIndex_0", + "sourceHandle": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_1", + "sourceHandle": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_1-openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Translator.json b/packages/server/marketplaces/chatflows/Translator.json index 0bf49252..5c8a3cc5 100644 --- a/packages/server/marketplaces/chatflows/Translator.json +++ b/packages/server/marketplaces/chatflows/Translator.json @@ -1,5 +1,7 @@ { "description": "Language translation using LLM Chain with a Chat Prompt Template and Chat Model", + "categories": "Chat Prompt Template,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json index d3bb5bf8..2ef1474a 100644 --- a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json +++ b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json @@ -1,4 +1,7 @@ { + "description": "QA chain for Vectara", + "categories": "Vectara QA Chain,Vectara,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index 2376e29e..232bd83e 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -1,5 +1,7 @@ { "description": "Conversational Agent with ability to visit a website and extract information", + "categories": "Buffer Memory,Web Browser,ChatOpenAI,Conversational Agent", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index a5a53233..50806161 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -1,5 +1,7 @@ { "description": "Scrape web pages for QnA with long term memory Motorhead and return source documents", + "categories": "HtmlToMarkdown,Cheerio Web Scraper,ChatOpenAI,Redis,Pinecone,Langchain", + "framework": "Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/tools/Add Hubspot Contact.json b/packages/server/marketplaces/tools/Add Hubspot Contact.json index 584df4c3..f8715dcd 100644 --- a/packages/server/marketplaces/tools/Add Hubspot Contact.json +++ b/packages/server/marketplaces/tools/Add Hubspot Contact.json @@ -1,5 +1,6 @@ { "name": "add_contact_hubspot", + "framework": "Langchain", "description": "Add new contact to Hubspot", "color": "linear-gradient(rgb(85,198,123), rgb(0,230,99))", "iconSrc": "https://cdn.worldvectorlogo.com/logos/hubspot-1.svg", diff --git a/packages/server/marketplaces/tools/Create Airtable Record.json b/packages/server/marketplaces/tools/Create Airtable Record.json index c52c9199..5471b650 100644 --- a/packages/server/marketplaces/tools/Create Airtable Record.json +++ b/packages/server/marketplaces/tools/Create Airtable Record.json @@ -1,5 +1,6 @@ { "name": "add_airtable", + "framework": "Langchain", "description": "Add column1, column2 to Airtable", "color": "linear-gradient(rgb(125,71,222), rgb(128,102,23))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/airtable.svg", diff --git a/packages/server/marketplaces/tools/Get Current DateTime.json b/packages/server/marketplaces/tools/Get Current DateTime.json index b6860b30..b8279e33 100644 --- a/packages/server/marketplaces/tools/Get Current DateTime.json +++ b/packages/server/marketplaces/tools/Get Current DateTime.json @@ -1,5 +1,6 @@ { "name": "todays_date_time", + "framework": "Langchain", "description": "Useful to get todays day, date and time.", "color": "linear-gradient(rgb(117,118,129), rgb(230,10,250))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/javascript.svg", diff --git a/packages/server/marketplaces/tools/Get Stock Mover.json b/packages/server/marketplaces/tools/Get Stock Mover.json index 9108cc50..27d444b2 100644 --- a/packages/server/marketplaces/tools/Get Stock Mover.json +++ b/packages/server/marketplaces/tools/Get Stock Mover.json @@ -1,5 +1,6 @@ { "name": "get_stock_movers", + "framework": "Langchain", "description": "Get the stocks that has biggest price/volume moves, e.g. actives, gainers, losers, etc.", "iconSrc": "https://rapidapi.com/cdn/images?url=https://rapidapi-prod-apis.s3.amazonaws.com/9c/e743343bdd41edad39a3fdffd5b974/016c33699f51603ae6fe4420c439124b.png", "color": "linear-gradient(rgb(191,202,167), rgb(143,202,246))", diff --git a/packages/server/marketplaces/tools/Make Webhook.json b/packages/server/marketplaces/tools/Make Webhook.json index 24d00900..93e67a3f 100644 --- a/packages/server/marketplaces/tools/Make Webhook.json +++ b/packages/server/marketplaces/tools/Make Webhook.json @@ -1,5 +1,6 @@ { "name": "make_webhook", + "framework": "Langchain", "description": "Useful when you need to send message to Discord", "color": "linear-gradient(rgb(19,94,2), rgb(19,124,59))", "iconSrc": "https://github.com/FlowiseAI/Flowise/assets/26460777/517fdab2-8a6e-4781-b3c8-fb92cc78aa0b", diff --git a/packages/server/marketplaces/tools/Send Discord Message.json b/packages/server/marketplaces/tools/Send Discord Message.json index bbfaaa90..2d7adcac 100644 --- a/packages/server/marketplaces/tools/Send Discord Message.json +++ b/packages/server/marketplaces/tools/Send Discord Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_discord_channel", + "framework": "Langchain", "description": "Send message to Discord channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/discord-icon.svg", diff --git a/packages/server/marketplaces/tools/Send Slack Message.json b/packages/server/marketplaces/tools/Send Slack Message.json index f15d4050..5516b69a 100644 --- a/packages/server/marketplaces/tools/Send Slack Message.json +++ b/packages/server/marketplaces/tools/Send Slack Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_slack_channel", + "framework": "Langchain", "description": "Send message to Slack channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/slack-icon.svg", diff --git a/packages/server/marketplaces/tools/Send Teams Message.json b/packages/server/marketplaces/tools/Send Teams Message.json index 1af8111b..8ec32abd 100644 --- a/packages/server/marketplaces/tools/Send Teams Message.json +++ b/packages/server/marketplaces/tools/Send Teams Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_teams_channel", + "framework": "Langchain", "description": "Send message to Teams channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/microsoft-teams.svg", diff --git a/packages/server/marketplaces/tools/SendGrid Email.json b/packages/server/marketplaces/tools/SendGrid Email.json index 8a6bf993..b454f2c5 100644 --- a/packages/server/marketplaces/tools/SendGrid Email.json +++ b/packages/server/marketplaces/tools/SendGrid Email.json @@ -1,5 +1,6 @@ { "name": "sendgrid_email", + "framework": "Langchain", "description": "Send email using SendGrid", "color": "linear-gradient(rgb(230,108,70), rgb(222,4,98))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/sendgrid-icon.svg", diff --git a/packages/server/package.json b/packages/server/package.json index c7ed13ac..0d7dea77 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", diff --git a/packages/server/src/commands/start.ts b/packages/server/src/commands/start.ts index 88713804..ba9f285e 100644 --- a/packages/server/src/commands/start.ts +++ b/packages/server/src/commands/start.ts @@ -18,6 +18,7 @@ export default class Start extends Command { static flags = { FLOWISE_USERNAME: Flags.string(), FLOWISE_PASSWORD: Flags.string(), + FLOWISE_FILE_SIZE_LIMIT: Flags.string(), PORT: Flags.string(), CORS_ORIGINS: Flags.string(), IFRAME_ORIGINS: Flags.string(), @@ -95,6 +96,9 @@ export default class Start extends Command { // Storage if (flags.BLOB_STORAGE_PATH) process.env.BLOB_STORAGE_PATH = flags.BLOB_STORAGE_PATH + //API Configuration + if (flags.FLOWISE_FILE_SIZE_LIMIT) process.env.FLOWISE_FILE_SIZE_LIMIT = flags.FLOWISE_FILE_SIZE_LIMIT + // Credentials if (flags.SECRETKEY_PATH) process.env.SECRETKEY_PATH = flags.SECRETKEY_PATH if (flags.FLOWISE_SECRETKEY_OVERWRITE) process.env.FLOWISE_SECRETKEY_OVERWRITE = flags.FLOWISE_SECRETKEY_OVERWRITE diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index be1e1cb8..851da8c8 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -133,8 +133,9 @@ export class App { async config(socketIO?: Server) { // Limit is needed to allow sending/receiving base64 encoded string - this.app.use(express.json({ limit: '50mb' })) - this.app.use(express.urlencoded({ limit: '50mb', extended: true })) + const flowise_file_size_limit = process.env.FLOWISE_FILE_SIZE_LIMIT ?? '50mb' + this.app.use(express.json({ limit: flowise_file_size_limit })) + this.app.use(express.urlencoded({ limit: flowise_file_size_limit, extended: true })) if (process.env.NUMBER_OF_PROXIES && parseInt(process.env.NUMBER_OF_PROXIES) > 0) this.app.set('trust proxy', parseInt(process.env.NUMBER_OF_PROXIES)) @@ -199,7 +200,7 @@ export class App { this.app.get('/api/v1/ip', (request, response) => { response.send({ ip: request.ip, - msg: 'See the returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 until the IP address matches your own. Visit https://docs.flowiseai.com/deployment#rate-limit-setup-guide for more information.' + msg: 'Check returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 and restart Cloud-Hosted Flowise until the IP address matches your own. Visit https://docs.flowiseai.com/configuration/rate-limit#cloud-hosted-rate-limit-setup-guide for more information.' }) }) @@ -509,7 +510,12 @@ export class App { const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode' if (!isEndingNode) { - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } } @@ -542,6 +548,7 @@ export class App { const chatId = req.query?.chatId as string | undefined const memoryType = req.query?.memoryType as string | undefined const sessionId = req.query?.sessionId as string | undefined + const messageId = req.query?.messageId as string | undefined const startDate = req.query?.startDate as string | undefined const endDate = req.query?.endDate as string | undefined let chatTypeFilter = req.query?.chatType as chatType | undefined @@ -569,7 +576,8 @@ export class App { memoryType, sessionId, startDate, - endDate + endDate, + messageId ) return res.json(chatmessages) }) @@ -1224,8 +1232,14 @@ export class App { this.app.get('/api/v1/fetch-links', async (req: Request, res: Response) => { const url = decodeURIComponent(req.query.url as string) const relativeLinksMethod = req.query.relativeLinksMethod as string + if (!relativeLinksMethod) { + return res.status(500).send('Please choose a Relative Links Method in Additional Parameters.') + } + + const limit = parseInt(req.query.limit as string) if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) - const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, 0) : await xmlScrape(url, 0) + const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, limit) : await xmlScrape(url, limit) + if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`) res.json({ status: 'OK', links }) }) @@ -1299,50 +1313,52 @@ export class App { // Marketplaces // ---------------------------------------- - // Get all chatflows for marketplaces - this.app.get('/api/v1/marketplaces/chatflows', async (req: Request, res: Response) => { - const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') - const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - const templates: any[] = [] + // Get all templates for marketplaces + this.app.get('/api/v1/marketplaces/templates', async (req: Request, res: Response) => { + let marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') + let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + let templates: any[] = [] jsonsInDir.forEach((file, index) => { const filePath = path.join(__dirname, '..', 'marketplaces', 'chatflows', file) const fileData = fs.readFileSync(filePath) const fileDataObj = JSON.parse(fileData.toString()) const template = { id: index, - name: file.split('.json')[0], + templateName: file.split('.json')[0], flowData: fileData.toString(), badge: fileDataObj?.badge, + framework: fileDataObj?.framework, + categories: fileDataObj?.categories, + type: 'Chatflow', description: fileDataObj?.description || '' } templates.push(template) }) + + marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools') + jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + jsonsInDir.forEach((file, index) => { + const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file) + const fileData = fs.readFileSync(filePath) + const fileDataObj = JSON.parse(fileData.toString()) + const template = { + ...fileDataObj, + id: index, + type: 'Tool', + framework: fileDataObj?.framework, + badge: fileDataObj?.badge, + categories: '', + templateName: file.split('.json')[0] + } + templates.push(template) + }) const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA') const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA') if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) { templates.splice(FlowiseDocsQnAIndex, 1) templates.unshift(FlowiseDocsQnA) } - return res.json(templates) - }) - - // Get all tools for marketplaces - this.app.get('/api/v1/marketplaces/tools', async (req: Request, res: Response) => { - const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools') - const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - const templates: any[] = [] - jsonsInDir.forEach((file, index) => { - const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file) - const fileData = fs.readFileSync(filePath) - const fileDataObj = JSON.parse(fileData.toString()) - const template = { - ...fileDataObj, - id: index, - templateName: file.split('.json')[0] - } - templates.push(template) - }) - return res.json(templates) + return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName))) }) // ---------------------------------------- @@ -1573,7 +1589,8 @@ export class App { memoryType?: string, sessionId?: string, startDate?: string, - endDate?: string + endDate?: string, + messageId?: string ): Promise { let fromDate if (startDate) fromDate = new Date(startDate) @@ -1587,8 +1604,9 @@ export class App { chatType, chatId, memoryType: memoryType ?? (chatId ? IsNull() : undefined), - sessionId: sessionId ?? (chatId ? IsNull() : undefined), - createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined + sessionId: sessionId ?? undefined, + createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined, + id: messageId ?? undefined }, order: { createdDate: sortOrder === 'DESC' ? 'DESC' : 'ASC' @@ -1888,7 +1906,12 @@ export class App { const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode' if (!isEndingNode) { - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 31a5a5f4..60faede5 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -821,7 +821,16 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'], + 'Chat Models': [ + 'azureChatOpenAI', + 'chatOpenAI', + 'chatOpenAI_LlamaIndex', + 'chatAnthropic', + 'chatAnthropic_LlamaIndex', + 'chatOllama', + 'awsChatBedrock', + 'chatMistralAI' + ], LLMs: ['azureOpenAI', 'openAI', 'ollama'] } @@ -844,6 +853,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod // Agent that are available to stream const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) + } else if (endingNodeData.category === 'Engine') { + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine'] + isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } // If no output parser, flow is available to stream diff --git a/packages/ui/craco.config.js b/packages/ui/craco.config.js index 142305e0..093e5ece 100644 --- a/packages/ui/craco.config.js +++ b/packages/ui/craco.config.js @@ -10,7 +10,8 @@ module.exports = { } } ] - } + }, + ignoreWarnings: [/Failed to parse source map/] // Ignore warnings about source maps } } } diff --git a/packages/ui/package.json b/packages/ui/package.json index 68d78c95..eb3bebda 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "1.4.9", + "version": "1.5.0", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { diff --git a/packages/ui/src/api/marketplaces.js b/packages/ui/src/api/marketplaces.js index 3fd4ae87..bba914a7 100644 --- a/packages/ui/src/api/marketplaces.js +++ b/packages/ui/src/api/marketplaces.js @@ -2,8 +2,10 @@ import client from './client' const getAllChatflowsMarketplaces = () => client.get('/marketplaces/chatflows') const getAllToolsMarketplaces = () => client.get('/marketplaces/tools') +const getAllTemplatesFromMarketplaces = () => client.get('/marketplaces/templates') export default { getAllChatflowsMarketplaces, - getAllToolsMarketplaces + getAllToolsMarketplaces, + getAllTemplatesFromMarketplaces } diff --git a/packages/ui/src/api/scraper.js b/packages/ui/src/api/scraper.js index 382a9263..89333156 100644 --- a/packages/ui/src/api/scraper.js +++ b/packages/ui/src/api/scraper.js @@ -1,8 +1,8 @@ import client from './client' -const fetchAllLinks = (url, relativeLinksMethod) => - client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}`) +const fetchLinks = (url, relativeLinksMethod, relativeLinksLimit) => + client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}&limit=${relativeLinksLimit}`) export default { - fetchAllLinks + fetchLinks } diff --git a/packages/ui/src/assets/images/llamaindex.png b/packages/ui/src/assets/images/llamaindex.png new file mode 100644 index 00000000..139c33eb Binary files /dev/null and b/packages/ui/src/assets/images/llamaindex.png differ diff --git a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js index a707d82e..a4199504 100644 --- a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js +++ b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js @@ -16,7 +16,7 @@ import { Stack, Typography } from '@mui/material' -import { IconTrash } from '@tabler/icons' +import { IconTrash, IconX } from '@tabler/icons' import PerfectScrollbar from 'react-perfect-scrollbar' import { BackdropLoader } from 'ui-component/loading/BackdropLoader' @@ -24,12 +24,23 @@ import { StyledButton } from 'ui-component/button/StyledButton' import scraperApi from 'api/scraper' -import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from 'store/actions' +import useNotifier from 'utils/useNotifier' + +import { + HIDE_CANVAS_DIALOG, + SHOW_CANVAS_DIALOG, + enqueueSnackbar as enqueueSnackbarAction, + closeSnackbar as closeSnackbarAction +} from 'store/actions' const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => { const portalElement = document.getElementById('portal') const dispatch = useDispatch() + useNotifier() + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) + const [loading, setLoading] = useState(false) const [selectedLinks, setSelectedLinks] = useState([]) const [url, setUrl] = useState('') @@ -53,9 +64,38 @@ const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => { const handleFetchLinks = async () => { setLoading(true) - const fetchLinksResp = await scraperApi.fetchAllLinks(url, 'webCrawl') - if (fetchLinksResp.data) { - setSelectedLinks(fetchLinksResp.data.links) + try { + const fetchLinksResp = await scraperApi.fetchLinks(url, dialogProps.relativeLinksMethod, dialogProps.limit) + if (fetchLinksResp.data) { + setSelectedLinks(fetchLinksResp.data.links) + enqueueSnackbar({ + message: 'Successfully fetched links', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + } + } catch (error) { + const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}` + enqueueSnackbar({ + message: errorData, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) } setLoading(false) } diff --git a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js index 6f3bec5d..a5dbd411 100644 --- a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js +++ b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js @@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { )} + {dialogProps.data.tags && + dialogProps.data.tags.length && + dialogProps.data.tags.map((tag, index) => ( +
+ + {tag.toLowerCase()} + +
+ ))} diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js new file mode 100644 index 00000000..3b66409b --- /dev/null +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -0,0 +1,146 @@ +import PropTypes from 'prop-types' +import { styled } from '@mui/material/styles' +import Table from '@mui/material/Table' +import TableBody from '@mui/material/TableBody' +import TableCell, { tableCellClasses } from '@mui/material/TableCell' +import TableContainer from '@mui/material/TableContainer' +import TableHead from '@mui/material/TableHead' +import TableRow from '@mui/material/TableRow' +import Paper from '@mui/material/Paper' +import Chip from '@mui/material/Chip' +import { Button, Typography } from '@mui/material' + +const StyledTableCell = styled(TableCell)(({ theme }) => ({ + [`&.${tableCellClasses.head}`]: { + backgroundColor: theme.palette.common.black, + color: theme.palette.common.white + }, + [`&.${tableCellClasses.body}`]: { + fontSize: 14 + } +})) + +const StyledTableRow = styled(TableRow)(({ theme }) => ({ + '&:nth-of-type(odd)': { + backgroundColor: theme.palette.action.hover + }, + // hide last border + '&:last-child td, &:last-child th': { + border: 0 + } +})) + +export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterByType, filterByFramework, goToCanvas, goToTool }) => { + const openTemplate = (selectedTemplate) => { + if (selectedTemplate.flowData) { + goToCanvas(selectedTemplate) + } else { + goToTool(selectedTemplate) + } + } + + return ( + <> + + + + + + Name + + + Type + + + Description + + + Nodes + + +   + + + + + {data + .filter(filterByBadge) + .filter(filterByType) + .filter(filterFunction) + .filter(filterByFramework) + .map((row, index) => ( + + + + + + + + {row.type} + + + + {row.description || ''} + + + +
+ {row.categories && + row.categories + .split(',') + .map((tag, index) => ( + + ))} +
+
+ + + {row.badge && + row.badge + .split(';') + .map((tag, index) => ( + + ))} + + +
+ ))} +
+
+
+ + ) +} + +MarketplaceTable.propTypes = { + data: PropTypes.array, + filterFunction: PropTypes.func, + filterByBadge: PropTypes.func, + filterByType: PropTypes.func, + filterByFramework: PropTypes.func, + goToTool: PropTypes.func, + goToCanvas: PropTypes.func +} diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index 57ba8892..74dc9578 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -99,6 +99,7 @@ export const initNode = (nodeData, newNodeId) => { id: `${newNodeId}-output-${nodeData.outputs[j].name}-${baseClasses}`, name: nodeData.outputs[j].name, label: nodeData.outputs[j].label, + description: nodeData.outputs[j].description ?? '', type } options.push(newOutputOption) @@ -107,6 +108,7 @@ export const initNode = (nodeData, newNodeId) => { name: 'output', label: 'Output', type: 'options', + description: nodeData.outputs[0].description ?? '', options, default: nodeData.outputs[0].name } @@ -116,6 +118,7 @@ export const initNode = (nodeData, newNodeId) => { id: `${newNodeId}-output-${nodeData.name}-${nodeData.baseClasses.join('|')}`, name: nodeData.name, label: nodeData.type, + description: nodeData.description ?? '', type: nodeData.baseClasses.join(' | ') } outputAnchors.push(newOutput) @@ -277,6 +280,7 @@ export const generateExportFlowData = (flowData) => { name: node.data.name, type: node.data.type, baseClasses: node.data.baseClasses, + tags: node.data.tags, category: node.data.category, description: node.data.description, inputParams: node.data.inputParams, diff --git a/packages/ui/src/views/canvas/AddNodes.js b/packages/ui/src/views/canvas/AddNodes.js index 7bf3e7ff..61db1716 100644 --- a/packages/ui/src/views/canvas/AddNodes.js +++ b/packages/ui/src/views/canvas/AddNodes.js @@ -22,7 +22,9 @@ import { Popper, Stack, Typography, - Chip + Chip, + Tab, + Tabs } from '@mui/material' import ExpandMoreIcon from '@mui/icons-material/ExpandMore' @@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab' // icons import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons' +import LlamaindexPNG from 'assets/images/llamaindex.png' +import LangChainPNG from 'assets/images/langchain.png' // const import { baseURL } from 'store/constant' import { SET_COMPONENT_NODES } from 'store/actions' // ==============================|| ADD NODES||============================== // +function a11yProps(index) { + return { + id: `attachment-tab-${index}`, + 'aria-controls': `attachment-tabpanel-${index}` + } +} const AddNodes = ({ nodesData, node }) => { const theme = useTheme() @@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => { const [nodes, setNodes] = useState({}) const [open, setOpen] = useState(false) const [categoryExpanded, setCategoryExpanded] = useState({}) + const [tabValue, setTabValue] = useState(0) const anchorRef = useRef(null) const prevOpen = useRef(open) @@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => { } } + const handleTabChange = (event, newValue) => { + setTabValue(newValue) + filterSearch(searchValue, newValue) + } + const getSearchedNodes = (value) => { const passed = nodesData.filter((nd) => { const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase()) @@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => { return passed } - const filterSearch = (value) => { + const filterSearch = (value, newTabValue) => { setSearchValue(value) setTimeout(() => { if (value) { const returnData = getSearchedNodes(value) - groupByCategory(returnData, true) + groupByCategory(returnData, newTabValue ?? tabValue, true) scrollTop() } else if (value === '') { - groupByCategory(nodesData) + groupByCategory(nodesData, newTabValue ?? tabValue) scrollTop() } }, 500) } - const groupByCategory = (nodes, isFilter) => { + const groupByTags = (nodes, newTabValue = 0) => { + const langchainNodes = nodes.filter((nd) => !nd.tags) + const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex')) + if (newTabValue === 0) { + return langchainNodes + } else { + return llmaindexNodes + } + } + + const groupByCategory = (nodes, newTabValue, isFilter) => { + const taggedNodes = groupByTags(nodes, newTabValue) const accordianCategories = {} - const result = nodes.reduce(function (r, a) { + const result = taggedNodes.reduce(function (r, a) { r[a.category] = r[a.category] || [] r[a.category].push(a) accordianCategories[a.category] = isFilter ? true : false @@ -244,15 +271,72 @@ const AddNodes = ({ nodesData, node }) => { 'aria-label': 'weight' }} /> + + {['LangChain', 'LlamaIndex'].map((item, index) => ( + + {item} + + } + iconPosition='start' + sx={{ minHeight: '50px', height: '50px' }} + key={index} + label={item} + {...a11yProps(index)} + > + ))} +
+ BETA +
+
+ { ps.current = el }} - style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }} + style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }} > - + { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {warningMessage && ( <> -
{warningMessage}} placement='top'> diff --git a/packages/ui/src/views/canvas/NodeInputHandler.js b/packages/ui/src/views/canvas/NodeInputHandler.js index bc877c9f..560fb34e 100644 --- a/packages/ui/src/views/canvas/NodeInputHandler.js +++ b/packages/ui/src/views/canvas/NodeInputHandler.js @@ -91,9 +91,11 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA } } - const onManageLinksDialogClicked = (url, selectedLinks) => { + const onManageLinksDialogClicked = (url, selectedLinks, relativeLinksMethod, limit) => { const dialogProps = { url, + relativeLinksMethod, + limit, selectedLinks, confirmButtonName: 'Save', cancelButtonName: 'Cancel' @@ -475,7 +477,9 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA onClick={() => onManageLinksDialogClicked( data.inputs[inputParam.name] ?? inputParam.default ?? '', - data.inputs.selectedLinks + data.inputs.selectedLinks, + data.inputs['relativeLinksMethod'] ?? 'webCrawl', + parseInt(data.inputs['limit']) ?? 0 ) } > diff --git a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js index 8ec5ada3..44cb75e8 100644 --- a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js +++ b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js @@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog' // const import { baseURL } from 'store/constant' +import LlamaindexPNG from 'assets/images/llamaindex.png' const CardWrapper = styled(MainCard)(({ theme }) => ({ background: theme.palette.card.main, @@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => { {data.label}
+
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {(data.inputAnchors.length > 0 || data.inputParams.length > 0) && ( <> diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js index 665341c4..e5a65cb9 100644 --- a/packages/ui/src/views/marketplaces/index.js +++ b/packages/ui/src/views/marketplaces/index.js @@ -4,9 +4,26 @@ import { useSelector } from 'react-redux' import PropTypes from 'prop-types' // material-ui -import { Grid, Box, Stack, Tabs, Tab, Badge } from '@mui/material' +import { + Grid, + Box, + Stack, + Badge, + Toolbar, + TextField, + InputAdornment, + ButtonGroup, + ToggleButton, + InputLabel, + FormControl, + Select, + OutlinedInput, + Checkbox, + ListItemText, + Button +} from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconHierarchy, IconTool } from '@tabler/icons' +import { IconChevronsDown, IconChevronsUp, IconLayoutGrid, IconList, IconSearch } from '@tabler/icons' // project imports import MainCard from 'ui-component/cards/MainCard' @@ -23,6 +40,10 @@ import useApi from 'hooks/useApi' // const import { baseURL } from 'store/constant' +import * as React from 'react' +import ToggleButtonGroup from '@mui/material/ToggleButtonGroup' +import { MarketplaceTable } from '../../ui-component/table/MarketplaceTable' +import MenuItem from '@mui/material/MenuItem' function TabPanel(props) { const { children, value, index, ...other } = props @@ -45,6 +66,19 @@ TabPanel.propTypes = { value: PropTypes.number.isRequired } +const ITEM_HEIGHT = 48 +const ITEM_PADDING_TOP = 8 +const badges = ['POPULAR', 'NEW'] +const types = ['Chatflow', 'Tool'] +const framework = ['Langchain', 'LlamaIndex'] +const MenuProps = { + PaperProps: { + style: { + maxHeight: ITEM_HEIGHT * 4.5 + ITEM_PADDING_TOP, + width: 250 + } + } +} // ==============================|| Marketplace ||============================== // const Marketplace = () => { @@ -53,16 +87,77 @@ const Marketplace = () => { const theme = useTheme() const customization = useSelector((state) => state.customization) - const [isChatflowsLoading, setChatflowsLoading] = useState(true) - const [isToolsLoading, setToolsLoading] = useState(true) + const [isLoading, setLoading] = useState(true) const [images, setImages] = useState({}) - const tabItems = ['Chatflows', 'Tools'] - const [value, setValue] = useState(0) + const [showToolDialog, setShowToolDialog] = useState(false) const [toolDialogProps, setToolDialogProps] = useState({}) - const getAllChatflowsMarketplacesApi = useApi(marketplacesApi.getAllChatflowsMarketplaces) - const getAllToolsMarketplacesApi = useApi(marketplacesApi.getAllToolsMarketplaces) + const getAllTemplatesMarketplacesApi = useApi(marketplacesApi.getAllTemplatesFromMarketplaces) + + const [view, setView] = React.useState(localStorage.getItem('mpDisplayStyle') || 'card') + const [search, setSearch] = useState('') + + const [badgeFilter, setBadgeFilter] = useState([]) + const [typeFilter, setTypeFilter] = useState([]) + const [frameworkFilter, setFrameworkFilter] = useState([]) + const [open, setOpen] = useState(false) + const handleBadgeFilterChange = (event) => { + const { + target: { value } + } = event + setBadgeFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } + const handleTypeFilterChange = (event) => { + const { + target: { value } + } = event + setTypeFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } + const handleFrameworkFilterChange = (event) => { + const { + target: { value } + } = event + setFrameworkFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } + + const handleViewChange = (event, nextView) => { + localStorage.setItem('mpDisplayStyle', nextView) + setView(nextView) + } + + const onSearchChange = (event) => { + setSearch(event.target.value) + } + + function filterFlows(data) { + return ( + data.categories?.toLowerCase().indexOf(search.toLowerCase()) > -1 || + data.templateName.toLowerCase().indexOf(search.toLowerCase()) > -1 || + (data.description && data.description.toLowerCase().indexOf(search.toLowerCase()) > -1) + ) + } + + function filterByBadge(data) { + return badgeFilter.length > 0 ? badgeFilter.includes(data.badge) : true + } + + function filterByType(data) { + return typeFilter.length > 0 ? typeFilter.includes(data.type) : true + } + + function filterByFramework(data) { + return frameworkFilter.length > 0 ? frameworkFilter.includes(data.framework) : true + } const onUseTemplate = (selectedTool) => { const dialogProp = { @@ -90,39 +185,33 @@ const Marketplace = () => { navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) } - const handleChange = (event, newValue) => { - setValue(newValue) - } - useEffect(() => { - getAllChatflowsMarketplacesApi.request() - getAllToolsMarketplacesApi.request() + getAllTemplatesMarketplacesApi.request() // eslint-disable-next-line react-hooks/exhaustive-deps }, []) useEffect(() => { - setChatflowsLoading(getAllChatflowsMarketplacesApi.loading) - }, [getAllChatflowsMarketplacesApi.loading]) + setLoading(getAllTemplatesMarketplacesApi.loading) + }, [getAllTemplatesMarketplacesApi.loading]) useEffect(() => { - setToolsLoading(getAllToolsMarketplacesApi.loading) - }, [getAllToolsMarketplacesApi.loading]) - - useEffect(() => { - if (getAllChatflowsMarketplacesApi.data) { + if (getAllTemplatesMarketplacesApi.data) { try { - const chatflows = getAllChatflowsMarketplacesApi.data + const flows = getAllTemplatesMarketplacesApi.data + const images = {} - for (let i = 0; i < chatflows.length; i += 1) { - const flowDataStr = chatflows[i].flowData - const flowData = JSON.parse(flowDataStr) - const nodes = flowData.nodes || [] - images[chatflows[i].id] = [] - for (let j = 0; j < nodes.length; j += 1) { - const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` - if (!images[chatflows[i].id].includes(imageSrc)) { - images[chatflows[i].id].push(imageSrc) + for (let i = 0; i < flows.length; i += 1) { + if (flows[i].flowData) { + const flowDataStr = flows[i].flowData + const flowData = JSON.parse(flowDataStr) + const nodes = flowData.nodes || [] + images[flows[i].id] = [] + for (let j = 0; j < nodes.length; j += 1) { + const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` + if (!images[flows[i].id].includes(imageSrc)) { + images[flows[i].id].push(imageSrc) + } } } } @@ -131,80 +220,215 @@ const Marketplace = () => { console.error(e) } } - }, [getAllChatflowsMarketplacesApi.data]) + }, [getAllTemplatesMarketplacesApi.data]) return ( <> - -

Marketplace

-
- - {tabItems.map((item, index) => ( - : } - iconPosition='start' - label={{item}} + + +

Marketplace

+ + + + ) + }} /> - ))} -
- {tabItems.map((item, index) => ( - - {item === 'Chatflows' && ( - - {!isChatflowsLoading && - getAllChatflowsMarketplacesApi.data && - getAllChatflowsMarketplacesApi.data.map((data, index) => ( - - {data.badge && ( - + + + + + + + + + + + + + + + + + {open && ( + + + + + Tag + + + + + + Type + + + + + + Framework + + + + + + )} + + {!isLoading && (!view || view === 'card') && getAllTemplatesMarketplacesApi.data && ( + <> + + {getAllTemplatesMarketplacesApi.data + .filter(filterByBadge) + .filter(filterByType) + .filter(filterFlows) + .filter(filterByFramework) + .map((data, index) => ( + + {data.badge && ( + + {data.type === 'Chatflow' && ( goToCanvas(data)} data={data} images={images[data.id]} /> - - )} - {!data.badge && ( - goToCanvas(data)} data={data} images={images[data.id]} /> - )} - - ))} - - )} - {item === 'Tools' && ( - - {!isToolsLoading && - getAllToolsMarketplacesApi.data && - getAllToolsMarketplacesApi.data.map((data, index) => ( - - {data.badge && ( - - goToTool(data)} /> - - )} - {!data.badge && goToTool(data)} />} - - ))} - - )} - - ))} - {((!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0)) || - (!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0))) && ( + )} + {data.type === 'Tool' && goToTool(data)} />} + + )} + {!data.badge && data.type === 'Chatflow' && ( + goToCanvas(data)} data={data} images={images[data.id]} /> + )} + {!data.badge && data.type === 'Tool' && goToTool(data)} />} + + ))} + + + )} + {!isLoading && view === 'list' && getAllTemplatesMarketplacesApi.data && ( + + )} + + {!isLoading && (!getAllTemplatesMarketplacesApi.data || getAllTemplatesMarketplacesApi.data.length === 0) && (