Merge branch 'main' into FEATURE/Vision

feature/Vision
Henry Heng 2024-02-14 18:06:33 +08:00 committed by GitHub
commit dcb1ad15e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
130 changed files with 6581 additions and 284 deletions

View File

@ -123,6 +123,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
| DEBUG | 打印组件的日志 | 布尔值 | |
| BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` |
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |

View File

@ -127,6 +127,7 @@ Flowise support different environment variables to configure your instance. You
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |

View File

@ -22,6 +22,7 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# FLOWISE_FILE_SIZE_LIMIT=50mb
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs

View File

@ -10,6 +10,7 @@ services:
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
- DEBUG=${DEBUG}
- DATABASE_PATH=${DATABASE_PATH}
- DATABASE_TYPE=${DATABASE_TYPE}

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "1.4.12",
"version": "1.5.0",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [

View File

@ -0,0 +1,135 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
interface AzureOpenAIConfig {
apiKey?: string
endpoint?: string
apiVersion?: string
deploymentName?: string
}
class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'AzureChatOpenAI'
this.name = 'azureChatOpenAI_LlamaIndex'
this.version = 1.0
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-16k',
name: 'gpt-3.5-turbo-16k'
}
],
default: 'gpt-3.5-turbo-16k',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
const temperature = nodeData.inputs?.temperature as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAI> & { azure?: AzureOpenAIConfig } = {
temperature: parseFloat(temperature),
model: modelName,
azure: {
apiKey: azureOpenAIApiKey,
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
apiVersion: azureOpenAIApiVersion,
deploymentName: azureOpenAIApiDeploymentName
}
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAI(obj)
return model
}
}
module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels }

View File

@ -0,0 +1,104 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Anthropic } from 'llamaindex'
class ChatAnthropic_LlamaIndex_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
tags: string[]
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic_LlamaIndex'
this.version = 1.0
this.type = 'ChatAnthropic'
this.icon = 'Anthropic.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around ChatAnthropic LLM specific for LlamaIndex'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['anthropicApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'claude-2',
name: 'claude-2',
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-instant-1',
name: 'claude-instant-1',
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
}
],
default: 'claude-2',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokensToSample',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
const topP = nodeData.inputs?.topP as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
const obj: Partial<Anthropic> = {
temperature: parseFloat(temperature),
model: modelName,
apiKey: anthropicApiKey
}
if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10)
if (topP) obj.topP = parseFloat(topP)
const model = new Anthropic(obj)
return model
}
}
module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels }

View File

@ -0,0 +1,156 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
class ChatOpenAI_LlamaIndex_LLMs implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI_LlamaIndex'
this.version = 1.0
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around OpenAI Chat LLM specific for LlamaIndex'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-turbo-preview',
name: 'gpt-4-turbo-preview'
},
{
label: 'gpt-4-0125-preview',
name: 'gpt-4-0125-preview'
},
{
label: 'gpt-4-1106-preview',
name: 'gpt-4-1106-preview'
},
{
label: 'gpt-4-vision-preview',
name: 'gpt-4-vision-preview'
},
{
label: 'gpt-4-0613',
name: 'gpt-4-0613'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-4-32k-0613',
name: 'gpt-4-32k-0613'
},
{
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-1106',
name: 'gpt-3.5-turbo-1106'
},
{
label: 'gpt-3.5-turbo-0613',
name: 'gpt-3.5-turbo-0613'
},
{
label: 'gpt-3.5-turbo-16k',
name: 'gpt-3.5-turbo-16k'
},
{
label: 'gpt-3.5-turbo-16k-0613',
name: 'gpt-3.5-turbo-16k-0613'
}
],
default: 'gpt-3.5-turbo',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAI> = {
temperature: parseFloat(temperature),
model: modelName,
apiKey: openAIApiKey
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAI(obj)
return model
}
}
module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs }

View File

@ -126,7 +126,9 @@ class Cheerio_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -143,7 +145,7 @@ class Cheerio_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await cheerioLoader(page)))
}
} else {

View File

@ -34,6 +34,12 @@ class Folder_DocumentLoaders implements INode {
type: 'string',
placeholder: ''
},
{
label: 'Recursive',
name: 'recursive',
type: 'boolean',
additionalParams: false
},
{
label: 'Text Splitter',
name: 'textSplitter',
@ -54,48 +60,54 @@ class Folder_DocumentLoaders implements INode {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const folderPath = nodeData.inputs?.folderPath as string
const metadata = nodeData.inputs?.metadata
const recursive = nodeData.inputs?.recursive as boolean
const loader = new DirectoryLoader(folderPath, {
'.json': (path) => new JSONLoader(path),
'.txt': (path) => new TextLoader(path),
'.csv': (path) => new CSVLoader(path),
'.docx': (path) => new DocxLoader(path),
// @ts-ignore
'.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }),
'.aspx': (path) => new TextLoader(path),
'.asp': (path) => new TextLoader(path),
'.cpp': (path) => new TextLoader(path), // C++
'.c': (path) => new TextLoader(path),
'.cs': (path) => new TextLoader(path),
'.css': (path) => new TextLoader(path),
'.go': (path) => new TextLoader(path), // Go
'.h': (path) => new TextLoader(path), // C++ Header files
'.java': (path) => new TextLoader(path), // Java
'.js': (path) => new TextLoader(path), // JavaScript
'.less': (path) => new TextLoader(path), // Less files
'.ts': (path) => new TextLoader(path), // TypeScript
'.php': (path) => new TextLoader(path), // PHP
'.proto': (path) => new TextLoader(path), // Protocol Buffers
'.python': (path) => new TextLoader(path), // Python
'.py': (path) => new TextLoader(path), // Python
'.rst': (path) => new TextLoader(path), // reStructuredText
'.ruby': (path) => new TextLoader(path), // Ruby
'.rb': (path) => new TextLoader(path), // Ruby
'.rs': (path) => new TextLoader(path), // Rust
'.scala': (path) => new TextLoader(path), // Scala
'.sc': (path) => new TextLoader(path), // Scala
'.scss': (path) => new TextLoader(path), // Sass
'.sol': (path) => new TextLoader(path), // Solidity
'.sql': (path) => new TextLoader(path), //SQL
'.swift': (path) => new TextLoader(path), // Swift
'.markdown': (path) => new TextLoader(path), // Markdown
'.md': (path) => new TextLoader(path), // Markdown
'.tex': (path) => new TextLoader(path), // LaTeX
'.ltx': (path) => new TextLoader(path), // LaTeX
'.html': (path) => new TextLoader(path), // HTML
'.vb': (path) => new TextLoader(path), // Visual Basic
'.xml': (path) => new TextLoader(path) // XML
})
const loader = new DirectoryLoader(
folderPath,
{
'.json': (path) => new JSONLoader(path),
'.txt': (path) => new TextLoader(path),
'.csv': (path) => new CSVLoader(path),
'.docx': (path) => new DocxLoader(path),
// @ts-ignore
'.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }),
'.aspx': (path) => new TextLoader(path),
'.asp': (path) => new TextLoader(path),
'.cpp': (path) => new TextLoader(path), // C++
'.c': (path) => new TextLoader(path),
'.cs': (path) => new TextLoader(path),
'.css': (path) => new TextLoader(path),
'.go': (path) => new TextLoader(path), // Go
'.h': (path) => new TextLoader(path), // C++ Header files
'.kt': (path) => new TextLoader(path), // Kotlin
'.java': (path) => new TextLoader(path), // Java
'.js': (path) => new TextLoader(path), // JavaScript
'.less': (path) => new TextLoader(path), // Less files
'.ts': (path) => new TextLoader(path), // TypeScript
'.php': (path) => new TextLoader(path), // PHP
'.proto': (path) => new TextLoader(path), // Protocol Buffers
'.python': (path) => new TextLoader(path), // Python
'.py': (path) => new TextLoader(path), // Python
'.rst': (path) => new TextLoader(path), // reStructuredText
'.ruby': (path) => new TextLoader(path), // Ruby
'.rb': (path) => new TextLoader(path), // Ruby
'.rs': (path) => new TextLoader(path), // Rust
'.scala': (path) => new TextLoader(path), // Scala
'.sc': (path) => new TextLoader(path), // Scala
'.scss': (path) => new TextLoader(path), // Sass
'.sol': (path) => new TextLoader(path), // Solidity
'.sql': (path) => new TextLoader(path), //SQL
'.swift': (path) => new TextLoader(path), // Swift
'.markdown': (path) => new TextLoader(path), // Markdown
'.md': (path) => new TextLoader(path), // Markdown
'.tex': (path) => new TextLoader(path), // LaTeX
'.ltx': (path) => new TextLoader(path), // LaTeX
'.html': (path) => new TextLoader(path), // HTML
'.vb': (path) => new TextLoader(path), // Visual Basic
'.xml': (path) => new TextLoader(path) // XML
},
recursive
)
let docs = []
if (textSplitter) {

View File

@ -51,11 +51,13 @@ class PlainText_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
baseClasses: this.baseClasses
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -167,7 +167,9 @@ class Playwright_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -184,7 +186,7 @@ class Playwright_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await playwrightLoader(page)))
}
} else {

View File

@ -168,7 +168,9 @@ class Puppeteer_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -185,7 +187,7 @@ class Puppeteer_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await puppeteerLoader(page)))
}
} else {

View File

@ -51,11 +51,13 @@ class Text_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
baseClasses: this.baseClasses
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -51,11 +51,13 @@ class VectorStoreToDocument_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -0,0 +1,77 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIEmbedding } from 'llamaindex'
interface AzureOpenAIConfig {
apiKey?: string
endpoint?: string
apiVersion?: string
deploymentName?: string
}
class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
tags: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Azure OpenAI Embeddings'
this.name = 'azureOpenAIEmbeddingsLlamaIndex'
this.version = 1.0
this.type = 'AzureOpenAIEmbeddings'
this.icon = 'Azure.svg'
this.category = 'Embeddings'
this.description = 'Azure OpenAI API embeddings specific for LlamaIndex'
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
{
label: 'Timeout',
name: 'timeout',
type: 'number',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAIEmbedding> & { azure?: AzureOpenAIConfig } = {
azure: {
apiKey: azureOpenAIApiKey,
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
apiVersion: azureOpenAIApiVersion,
deploymentName: azureOpenAIApiDeploymentName
}
}
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAIEmbedding(obj)
return model
}
}
module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings }

View File

@ -0,0 +1,91 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIEmbedding } from 'llamaindex'
class OpenAIEmbedding_LlamaIndex_Embeddings implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'OpenAI Embedding'
this.name = 'openAIEmbedding_LlamaIndex'
this.version = 1.0
this.type = 'OpenAIEmbedding'
this.icon = 'openai.svg'
this.category = 'Embeddings'
this.description = 'OpenAI Embedding specific for LlamaIndex'
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'text-embedding-3-large',
name: 'text-embedding-3-large'
},
{
label: 'text-embedding-3-small',
name: 'text-embedding-3-small'
},
{
label: 'text-embedding-ada-002',
name: 'text-embedding-ada-002'
}
],
default: 'text-embedding-ada-002',
optional: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
optional: true,
additionalParams: true
},
{
label: 'BasePath',
name: 'basepath',
type: 'string',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const timeout = nodeData.inputs?.timeout as string
const modelName = nodeData.inputs?.modelName as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbedding> = {
apiKey: openAIApiKey,
model: modelName
}
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAIEmbedding(obj)
return model
}
}
module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings }

View File

@ -0,0 +1,149 @@
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseNode, Metadata, BaseRetriever, LLM, ContextChatEngine, ChatMessage } from 'llamaindex'
import { reformatSourceDocuments } from '../EngineUtils'
class ContextChatEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Context Chat Engine'
this.name = 'contextChatEngine'
this.version = 1.0
this.type = 'ContextChatEngine'
this.icon = 'context-chat-engine.png'
this.category = 'Engine'
this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Vector Store Retriever',
name: 'vectorStoreRetriever',
type: 'VectorIndexRetriever'
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'Return Source Documents',
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
},
{
label: 'System Message',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const model = nodeData.inputs?.model as LLM
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const chatHistory = [] as ChatMessage[]
if (systemMessagePrompt) {
chatHistory.push({
content: systemMessagePrompt,
role: 'user'
})
}
const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever })
const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
let text = ''
let isStreamingStarted = false
let sourceDocuments: ICommonObject[] = []
let sourceNodes: BaseNode<Metadata>[] = []
const isStreamingEnabled = options.socketIO && options.socketIOClientId
if (isStreamingEnabled) {
const stream = await chatEngine.chat({ message: input, chatHistory, stream: true })
for await (const chunk of stream) {
text += chunk.response
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
if (!isStreamingStarted) {
isStreamingStarted = true
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
}
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
}
if (returnSourceDocuments) {
sourceDocuments = reformatSourceDocuments(sourceNodes)
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
}
} else {
const response = await chatEngine.chat({ message: input, chatHistory })
text = response?.response
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: text,
type: 'apiMessage'
}
],
this.sessionId
)
if (returnSourceDocuments) return { text, sourceDocuments }
else return { text }
}
}
module.exports = { nodeClass: ContextChatEngine_LlamaIndex }

View File

@ -0,0 +1,124 @@
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { LLM, ChatMessage, SimpleChatEngine } from 'llamaindex'
class SimpleChatEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Simple Chat Engine'
this.name = 'simpleChatEngine'
this.version = 1.0
this.type = 'SimpleChatEngine'
this.icon = 'chat-engine.png'
this.category = 'Engine'
this.description = 'Simple engine to handle back and forth conversations'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'System Message',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
placeholder: 'You are a helpful assistant'
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const model = nodeData.inputs?.model as LLM
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const chatHistory = [] as ChatMessage[]
if (systemMessagePrompt) {
chatHistory.push({
content: systemMessagePrompt,
role: 'user'
})
}
const chatEngine = new SimpleChatEngine({ llm: model })
const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
let text = ''
let isStreamingStarted = false
const isStreamingEnabled = options.socketIO && options.socketIOClientId
if (isStreamingEnabled) {
const stream = await chatEngine.chat({ message: input, chatHistory, stream: true })
for await (const chunk of stream) {
text += chunk.response
if (!isStreamingStarted) {
isStreamingStarted = true
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
}
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
}
} else {
const response = await chatEngine.chat({ message: input, chatHistory })
text = response?.response
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: text,
type: 'apiMessage'
}
],
this.sessionId
)
return text
}
}
module.exports = { nodeClass: SimpleChatEngine_LlamaIndex }

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.5 KiB

View File

@ -0,0 +1,12 @@
import { BaseNode, Metadata } from 'llamaindex'
export const reformatSourceDocuments = (sourceNodes: BaseNode<Metadata>[]) => {
const sourceDocuments = []
for (const node of sourceNodes) {
sourceDocuments.push({
pageContent: (node as any).text,
metadata: node.metadata
})
}
return sourceDocuments
}

View File

@ -0,0 +1,143 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import {
RetrieverQueryEngine,
ResponseSynthesizer,
CompactAndRefine,
TreeSummarize,
Refine,
SimpleResponseBuilder,
BaseNode,
Metadata
} from 'llamaindex'
import { reformatSourceDocuments } from '../EngineUtils'
class QueryEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Query Engine'
this.name = 'queryEngine'
this.version = 1.0
this.type = 'QueryEngine'
this.icon = 'query-engine.png'
this.category = 'Engine'
this.description = 'Simple query engine built to answer question over your data, without memory'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'vectorStoreRetriever',
type: 'VectorIndexRetriever'
},
{
label: 'Response Synthesizer',
name: 'responseSynthesizer',
type: 'ResponseSynthesizer',
description:
'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target="_blank" href="https://ts.llamaindex.ai/modules/low_level/response_synthesizer">more</a>',
optional: true
},
{
label: 'Return Source Documents',
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever)
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
}
}
let text = ''
let sourceDocuments: ICommonObject[] = []
let sourceNodes: BaseNode<Metadata>[] = []
let isStreamingStarted = false
const isStreamingEnabled = options.socketIO && options.socketIOClientId
if (isStreamingEnabled) {
const stream = await queryEngine.query({ query: input, stream: true })
for await (const chunk of stream) {
text += chunk.response
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
if (!isStreamingStarted) {
isStreamingStarted = true
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
}
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
}
if (returnSourceDocuments) {
sourceDocuments = reformatSourceDocuments(sourceNodes)
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
}
} else {
const response = await queryEngine.query({ query: input })
text = response?.response
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
}
if (returnSourceDocuments) return { text, sourceDocuments }
else return { text }
}
}
module.exports = { nodeClass: QueryEngine_LlamaIndex }

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,193 @@
import { flatten } from 'lodash'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import {
TreeSummarize,
SimpleResponseBuilder,
Refine,
BaseEmbedding,
ResponseSynthesizer,
CompactAndRefine,
QueryEngineTool,
LLMQuestionGenerator,
SubQuestionQueryEngine,
BaseNode,
Metadata,
serviceContextFromDefaults
} from 'llamaindex'
import { reformatSourceDocuments } from '../EngineUtils'
class SubQuestionQueryEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Sub Question Query Engine'
this.name = 'subQuestionQueryEngine'
this.version = 1.0
this.type = 'SubQuestionQueryEngine'
this.icon = 'subQueryEngine.svg'
this.category = 'Engine'
this.description =
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'QueryEngine Tools',
name: 'queryEngineTools',
type: 'QueryEngineTool',
list: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'BaseEmbedding_LlamaIndex'
},
{
label: 'Response Synthesizer',
name: 'responseSynthesizer',
type: 'ResponseSynthesizer',
description:
'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target="_blank" href="https://ts.llamaindex.ai/modules/low_level/response_synthesizer">more</a>',
optional: true
},
{
label: 'Return Source Documents',
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const serviceContext = serviceContextFromDefaults({
llm: model,
embedModel: embeddings
})
let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[]
queryEngineTools = flatten(queryEngineTools)
let queryEngine = SubQuestionQueryEngine.fromDefaults({
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(serviceContext),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
}
}
let text = ''
let sourceDocuments: ICommonObject[] = []
let sourceNodes: BaseNode<Metadata>[] = []
let isStreamingStarted = false
const isStreamingEnabled = options.socketIO && options.socketIOClientId
if (isStreamingEnabled) {
const stream = await queryEngine.query({ query: input, stream: true })
for await (const chunk of stream) {
text += chunk.response
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
if (!isStreamingStarted) {
isStreamingStarted = true
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
}
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
}
if (returnSourceDocuments) {
sourceDocuments = reformatSourceDocuments(sourceNodes)
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
}
} else {
const response = await queryEngine.query({ query: input })
text = response?.response
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
}
if (returnSourceDocuments) return { text, sourceDocuments }
else return { text }
}
}
module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-question" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M15 19l-6 2v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414" /><path d="M19 22v.01" /><path d="M19 19a2.003 2.003 0 0 0 .914 -3.782a1.98 1.98 0 0 0 -2.414 .483" /></svg>

After

Width:  |  Height:  |  Size: 506 B

View File

@ -117,7 +117,10 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
memoryKey: memoryKey ?? 'chat_history',
chatHistory: dynamoDb,
sessionId,
dynamodbClient: client
dynamodbClient: client,
tableName,
partitionKey,
dynamoKey: { [partitionKey]: { S: sessionId } }
})
return memory
}
@ -125,6 +128,9 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
interface BufferMemoryExtendedInput {
dynamodbClient: DynamoDBClient
sessionId: string
tableName: string
partitionKey: string
dynamoKey: Record<string, AttributeValue>
}
interface DynamoDBSerializedChatMessage {
@ -142,6 +148,10 @@ interface DynamoDBSerializedChatMessage {
}
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
private tableName = ''
private partitionKey = ''
private dynamoKey: Record<string, AttributeValue>
private messageAttributeName: string
sessionId = ''
dynamodbClient: DynamoDBClient
@ -149,11 +159,14 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
super(fields)
this.sessionId = fields.sessionId
this.dynamodbClient = fields.dynamodbClient
this.tableName = fields.tableName
this.partitionKey = fields.partitionKey
this.dynamoKey = fields.dynamoKey
}
overrideDynamoKey(overrideSessionId = '') {
const existingDynamoKey = (this as any).dynamoKey
const partitionKey = (this as any).partitionKey
const existingDynamoKey = this.dynamoKey
const partitionKey = this.partitionKey
let newDynamoKey: Record<string, AttributeValue> = {}
@ -209,9 +222,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.dynamodbClient) return []
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const messageAttributeName = this.messageAttributeName
const params: GetItemCommandInput = {
TableName: tableName,
@ -236,9 +249,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const messageAttributeName = this.messageAttributeName
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -259,8 +272,8 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const params: DeleteItemCommandInput = {
TableName: tableName,

View File

@ -154,7 +154,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.collection) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const document = await this.collection.findOne({ sessionId: id })
const messages = document?.messages || []
const baseMessages = messages.map(mapStoredMessageToChatMessage)
@ -164,7 +164,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -196,7 +196,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.collection.deleteOne({ sessionId: id })
await this.clear()
}

View File

@ -141,7 +141,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
try {
const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, {
//@ts-ignore
@ -172,7 +172,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
@ -182,7 +182,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.clear(id)
}
}

View File

@ -189,7 +189,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage)
@ -199,7 +199,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -219,7 +219,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.redisClient.del(id)
await this.clear()
}

View File

@ -114,7 +114,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange<StoredMessage>(id, 0, -1)
const orderedMessages = rawStoredMessages.reverse()
const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined)
@ -125,7 +125,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -145,7 +145,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.redisClient.del(id)
await this.clear()
}

View File

@ -163,14 +163,14 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods {
}
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const memoryVariables = await this.loadMemoryVariables({}, id)
const baseMessages = memoryVariables[this.memoryKey]
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
@ -180,7 +180,7 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods {
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.clear(id)
}
}

View File

@ -0,0 +1,79 @@
import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src'
import { BaseOutputParser } from 'langchain/schema/output_parser'
import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers'
import { CATEGORY } from '../OutputParserHelpers'
import { z } from 'zod'
class AdvancedStructuredOutputParser implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'Advanced Structured Output Parser'
this.name = 'advancedStructuredOutputParser'
this.version = 1.0
this.type = 'AdvancedStructuredOutputParser'
this.description = 'Parse the output of an LLM call into a given structure by providing a Zod schema.'
this.icon = 'structure.svg'
this.category = CATEGORY
this.baseClasses = [this.type, ...getBaseClasses(BaseOutputParser)]
this.inputs = [
{
label: 'Autofix',
name: 'autofixParser',
type: 'boolean',
optional: true,
description: 'In the event that the first call fails, will make another call to the model to fix any errors.'
},
{
label: 'Example JSON',
name: 'exampleJson',
type: 'string',
description: 'Zod schema for the output of the model',
rows: 10,
default: `z.object({
title: z.string(), // Title of the movie as a string
yearOfRelease: z.number().int(), // Release year as an integer number,
genres: z.enum([
"Action", "Comedy", "Drama", "Fantasy", "Horror",
"Mystery", "Romance", "Science Fiction", "Thriller", "Documentary"
]).array().max(2), // Array of genres, max of 2 from the defined enum
shortDescription: z.string().max(500) // Short description, max 500 characters
})`
}
]
}
async init(nodeData: INodeData): Promise<any> {
const schemaString = nodeData.inputs?.exampleJson as string
const autoFix = nodeData.inputs?.autofixParser as boolean
const zodSchemaFunction = new Function('z', `return ${schemaString}`)
const zodSchema = zodSchemaFunction(z)
try {
const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema)
// NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser
Object.defineProperty(structuredOutputParser, 'autoFix', {
enumerable: true,
configurable: true,
writable: true,
value: autoFix
})
return structuredOutputParser
} catch (exception) {
throw new Error('Error parsing Zod Schema: ' + exception)
}
}
}
module.exports = { nodeClass: AdvancedStructuredOutputParser }

View File

@ -0,0 +1,8 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M16 3V13M16 3L13 6.13609M16 3L19 6.13609" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M7 15V12C7 10.8954 7.89543 10 9 10H11M25 15V12C25 10.8954 24.1046 10 23 10H21" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.5644 20.4399C11.6769 19.7608 9 19.6332 9 21.7961C9 24.1915 13 22.5657 13 25.0902C13 26.9875 10.33 27.5912 9 26.3537" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M24 27V20L28 27V20" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M16 23.5C16 20.7 17.6667 20 18.5 20C19.3333 20 21 20.7 21 23.5C21 26.3 19.3333 27 18.5 27C17.6667 27 16 26.3 16 23.5Z" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 20V25C6 26.1046 5.10457 27 4 27V27C2.89543 27 2 26.1046 2 25V25" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -0,0 +1,75 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class CompactRefine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Compact and Refine'
this.name = 'compactrefineLlamaIndex'
this.version = 1.0
this.type = 'CompactRefine'
this.icon = 'compactrefine.svg'
this.category = 'Response Synthesizer'
this.description =
'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Refine Prompt',
name: 'refinePrompt',
type: 'string',
rows: 4,
default: `The original query is as follows: {query}
We have provided an existing answer: {existingAnswer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
Refined Answer:`,
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
optional: true
},
{
label: 'Text QA Prompt',
name: 'textQAPrompt',
type: 'string',
rows: 4,
default: `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const refinePrompt = nodeData.inputs?.refinePrompt as string
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' })
}
}
module.exports = { nodeClass: CompactRefine_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layers-difference" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M16 16v2a2 2 0 0 1 -2 2h-8a2 2 0 0 1 -2 -2v-8a2 2 0 0 1 2 -2h2v-2a2 2 0 0 1 2 -2h8a2 2 0 0 1 2 2v8a2 2 0 0 1 -2 2h-2" /><path d="M10 8l-2 0l0 2" /><path d="M8 14l0 2l2 0" /><path d="M14 8l2 0l0 2" /><path d="M16 14l0 2l-2 0" /></svg>

After

Width:  |  Height:  |  Size: 529 B

View File

@ -0,0 +1,75 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class Refine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Refine'
this.name = 'refineLlamaIndex'
this.version = 1.0
this.type = 'Refine'
this.icon = 'refine.svg'
this.category = 'Response Synthesizer'
this.description =
'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Refine Prompt',
name: 'refinePrompt',
type: 'string',
rows: 4,
default: `The original query is as follows: {query}
We have provided an existing answer: {existingAnswer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
Refined Answer:`,
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
optional: true
},
{
label: 'Text QA Prompt',
name: 'textQAPrompt',
type: 'string',
rows: 4,
default: `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const refinePrompt = nodeData.inputs?.refinePrompt as string
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' })
}
}
module.exports = { nodeClass: Refine_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-search" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M11.36 20.213l-2.36 .787v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414" /><path d="M18 18m-3 0a3 3 0 1 0 6 0a3 3 0 1 0 -6 0" /><path d="M20.2 20.2l1.8 1.8" /></svg>

After

Width:  |  Height:  |  Size: 501 B

View File

@ -0,0 +1,35 @@
import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class SimpleResponseBuilder_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Simple Response Builder'
this.name = 'simpleResponseBuilderLlamaIndex'
this.version = 1.0
this.type = 'SimpleResponseBuilder'
this.icon = 'simplerb.svg'
this.category = 'Response Synthesizer'
this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.`
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = []
}
async init(): Promise<any> {
return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' })
}
}
module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-quote" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M10 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /><path d="M19 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /></svg>

After

Width:  |  Height:  |  Size: 481 B

View File

@ -0,0 +1,56 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class TreeSummarize_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'TreeSummarize'
this.name = 'treeSummarizeLlamaIndex'
this.version = 1.0
this.type = 'TreeSummarize'
this.icon = 'treesummarize.svg'
this.category = 'Response Synthesizer'
this.description =
'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Prompt',
name: 'prompt',
type: 'string',
rows: 4,
default: `Context information from multiple sources is below.
---------------------
{context}
---------------------
Given the information from multiple sources and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const prompt = nodeData.inputs?.prompt as string
const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' })
}
}
module.exports = { nodeClass: TreeSummarize_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-tree" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 13l-2 -2" /><path d="M12 12l2 -2" /><path d="M12 21v-13" /><path d="M9.824 16a3 3 0 0 1 -2.743 -3.69a3 3 0 0 1 .304 -4.833a3 3 0 0 1 4.615 -3.707a3 3 0 0 1 4.614 3.707a3 3 0 0 1 .305 4.833a3 3 0 0 1 -2.919 3.695h-4z" /></svg>

After

Width:  |  Height:  |  Size: 512 B

View File

@ -0,0 +1,11 @@
export class ResponseSynthesizerClass {
type: string
textQAPromptTemplate?: any
refinePromptTemplate?: any
constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) {
this.type = params.type
this.textQAPromptTemplate = params.textQAPromptTemplate
this.refinePromptTemplate = params.refinePromptTemplate
}
}

View File

@ -94,11 +94,13 @@ class CohereRerankRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -78,11 +78,13 @@ class EmbeddingsFilterRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -140,11 +140,13 @@ Passage:`
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -58,11 +58,13 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -89,11 +89,13 @@ class RRFRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -74,11 +74,13 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -0,0 +1,68 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { VectorStoreIndex } from 'llamaindex'
class QueryEngine_Tools implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs?: INodeParams[]
constructor() {
this.label = 'QueryEngine Tool'
this.name = 'queryEngineToolLlamaIndex'
this.version = 1.0
this.type = 'QueryEngineTool'
this.icon = 'queryEngineTool.svg'
this.category = 'Tools'
this.tags = ['LlamaIndex']
this.description = 'Tool used to invoke query engine'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Vector Store Index',
name: 'vectorStoreIndex',
type: 'VectorStoreIndex'
},
{
label: 'Tool Name',
name: 'toolName',
type: 'string',
description: 'Tool name must be small capital letter with underscore. Ex: my_tool'
},
{
label: 'Tool Description',
name: 'toolDesc',
type: 'string',
rows: 4
}
]
}
async init(nodeData: INodeData): Promise<any> {
const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex
const toolName = nodeData.inputs?.toolName as string
const toolDesc = nodeData.inputs?.toolDesc as string
const queryEngineTool = {
queryEngine: vectorStoreIndex.asQueryEngine({
preFilters: {
...(vectorStoreIndex as any).metadatafilter
}
}),
metadata: {
name: toolName,
description: toolDesc
},
vectorStoreIndex
}
return queryEngineTool
}
}
module.exports = { nodeClass: QueryEngine_Tools }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-brand-google-big-query" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M17.73 19.875a2.225 2.225 0 0 1 -1.948 1.125h-7.283a2.222 2.222 0 0 1 -1.947 -1.158l-4.272 -6.75a2.269 2.269 0 0 1 0 -2.184l4.272 -6.75a2.225 2.225 0 0 1 1.946 -1.158h7.285c.809 0 1.554 .443 1.947 1.158l3.98 6.75a2.33 2.33 0 0 1 0 2.25l-3.98 6.75v-.033z" /><path d="M11.5 11.5m-3.5 0a3.5 3.5 0 1 0 7 0a3.5 3.5 0 1 0 -7 0" /><path d="M14 14l2 2" /></svg>

After

Width:  |  Height:  |  Size: 654 B

View File

@ -0,0 +1,383 @@
import {
BaseNode,
Document,
Metadata,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
serviceContextFromDefaults,
storageContextFromDefaults,
VectorStoreIndex,
BaseEmbedding
} from 'llamaindex'
import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone'
import { flatten } from 'lodash'
import { Document as LCDocument } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils'
class PineconeLlamaIndex_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Pinecone'
this.name = 'pineconeLlamaIndex'
this.version = 1.0
this.type = 'Pinecone'
this.icon = 'pinecone.svg'
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database`
this.baseClasses = [this.type, 'VectorIndexRetriever']
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['pineconeApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true,
optional: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'BaseEmbedding_LlamaIndex'
},
{
label: 'Pinecone Index',
name: 'pineconeIndex',
type: 'string'
},
{
label: 'Pinecone Namespace',
name: 'pineconeNamespace',
type: 'string',
placeholder: 'my-first-namespace',
additionalParams: true,
optional: true
},
{
label: 'Pinecone Metadata Filter',
name: 'pineconeMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Pinecone Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Pinecone Vector Store Index',
name: 'vectorStore',
baseClasses: [this.type, 'VectorStoreIndex']
}
]
}
//@ts-ignore
vectorStoreMethods = {
async upsert(nodeData: INodeData, options: ICommonObject): Promise<void> {
const indexName = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const docs = nodeData.inputs?.document as LCDocument[]
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pcvs = new PineconeVectorStore({
indexName,
apiKey: pineconeApiKey,
namespace: pineconeNamespace
})
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new LCDocument(flattenDocs[i]))
}
}
const llamadocs: Document[] = []
for (const doc of finalDocs) {
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
}
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
try {
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
} catch (e) {
throw new Error(e)
}
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const indexName = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const obj: PineconeParams = {
indexName,
apiKey: pineconeApiKey
}
if (pineconeNamespace) obj.namespace = pineconeNamespace
let metadatafilter = {}
if (pineconeMetadataFilter) {
metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter)
obj.queryFilter = metadatafilter
}
const pcvs = new PineconeVectorStore(obj)
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
const index = await VectorStoreIndex.init({
nodes: [],
storageContext,
serviceContext
})
const output = nodeData.outputs?.output as string
if (output === 'retriever') {
const retriever = index.asRetriever()
retriever.similarityTopK = k
;(retriever as any).serviceContext = serviceContext
return retriever
} else if (output === 'vectorStore') {
;(index as any).k = k
if (metadatafilter) {
;(index as any).metadatafilter = metadatafilter
}
return index
}
return index
}
}
type PineconeParams = {
indexName: string
apiKey: string
namespace?: string
chunkSize?: number
queryFilter?: object
}
class PineconeVectorStore implements VectorStore {
storesText: boolean = true
db?: Pinecone
indexName: string
apiKey: string
chunkSize: number
namespace?: string
queryFilter?: object
constructor(params: PineconeParams) {
this.indexName = params?.indexName
this.apiKey = params?.apiKey
this.namespace = params?.namespace ?? ''
this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100')
this.queryFilter = params?.queryFilter ?? {}
}
private async getDb(): Promise<Pinecone> {
if (!this.db) {
this.db = new Pinecone({
apiKey: this.apiKey
})
}
return Promise.resolve(this.db)
}
client() {
return this.getDb()
}
async index() {
const db: Pinecone = await this.getDb()
return db.Index(this.indexName)
}
async clearIndex() {
const db: Pinecone = await this.getDb()
return await db.index(this.indexName).deleteAll()
}
async add(embeddingResults: BaseNode<Metadata>[]): Promise<string[]> {
if (embeddingResults.length == 0) {
return Promise.resolve([])
}
const idx: Index = await this.index()
const nodes = embeddingResults.map(this.nodeToRecord)
for (let i = 0; i < nodes.length; i += this.chunkSize) {
const chunk = nodes.slice(i, i + this.chunkSize)
const result = await this.saveChunk(idx, chunk)
if (!result) {
return Promise.reject()
}
}
return Promise.resolve([])
}
protected async saveChunk(idx: Index, chunk: any) {
try {
const namespace = idx.namespace(this.namespace ?? '')
await namespace.upsert(chunk)
return true
} catch (err) {
return false
}
}
async delete(refDocId: string): Promise<void> {
const idx = await this.index()
const namespace = idx.namespace(this.namespace ?? '')
return namespace.deleteOne(refDocId)
}
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
const queryOptions: any = {
vector: query.queryEmbedding,
topK: query.similarityTopK,
filter: this.queryFilter
}
const idx = await this.index()
const namespace = idx.namespace(this.namespace ?? '')
const results = await namespace.query(queryOptions)
const idList = results.matches.map((row) => row.id)
const records: FetchResponse<any> = await namespace.fetch(idList)
const rows = Object.values(records.records)
const nodes = rows.map((row) => {
return new Document({
id_: row.id,
text: this.textFromResultRow(row),
metadata: this.metaWithoutText(row.metadata),
embedding: row.values
})
})
const result = {
nodes: nodes,
similarities: results.matches.map((row) => row.score || 999),
ids: results.matches.map((row) => row.id)
}
return Promise.resolve(result)
}
/**
* Required by VectorStore interface. Currently ignored.
*/
persist(): Promise<void> {
return Promise.resolve()
}
textFromResultRow(row: ScoredPineconeRecord<Metadata>): string {
return row.metadata?.text ?? ''
}
metaWithoutText(meta: Metadata): any {
return Object.keys(meta)
.filter((key) => key != 'text')
.reduce((acc: any, key: string) => {
acc[key] = meta[key]
return acc
}, {})
}
nodeToRecord(node: BaseNode<Metadata>) {
let id: any = node.id_.length ? node.id_ : null
return {
id: id,
values: node.getEmbedding(),
metadata: {
...cleanupMetadata(node.metadata),
text: (node as any).text
}
}
}
}
const cleanupMetadata = (nodeMetadata: ICommonObject) => {
// Pinecone doesn't support nested objects, so we flatten them
const documentMetadata: any = { ...nodeMetadata }
// preserve string arrays which are allowed
const stringArrays: Record<string, string[]> = {}
for (const key of Object.keys(documentMetadata)) {
if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) {
stringArrays[key] = documentMetadata[key]
delete documentMetadata[key]
}
}
const metadata: {
[key: string]: string | number | boolean | string[] | null
} = {
...flattenObject(documentMetadata),
...stringArrays
}
// Pinecone doesn't support null values, so we remove them
for (const key of Object.keys(metadata)) {
if (metadata[key] == null) {
delete metadata[key]
} else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) {
delete metadata[key]
}
}
return metadata
}
module.exports = { nodeClass: PineconeLlamaIndex_VectorStores }

View File

@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode {
constructor() {
this.label = 'Postgres'
this.name = 'postgres'
this.version = 2.0
this.version = 3.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
@ -60,13 +60,6 @@ class Postgres_VectorStores implements INode {
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
@ -124,7 +117,6 @@ class Postgres_VectorStores implements INode {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const additionalConfig = nodeData.inputs?.additionalConfig as string
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
@ -142,8 +134,7 @@ class Postgres_VectorStores implements INode {
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string,
ssl: sslConnection
database: nodeData.inputs?.database as string
}
const args = {
@ -198,7 +189,8 @@ class Postgres_VectorStores implements INode {
type: 'postgres',
host: nodeData.inputs?.host as string,
port: nodeData.inputs?.port as number,
username: user,
username: user, // Required by TypeORMVectorStore
user: user, // Required by Pool in similaritySearchVectorWithScore
password: password,
database: nodeData.inputs?.database as string
}
@ -248,14 +240,7 @@ const similaritySearchVectorWithScore = async (
ORDER BY "_distance" ASC
LIMIT $3;`
const poolOptions = {
host: postgresConnectionOptions.host,
port: postgresConnectionOptions.port,
user: postgresConnectionOptions.username,
password: postgresConnectionOptions.password,
database: postgresConnectionOptions.database
}
const pool = new Pool(poolOptions)
const pool = new Pool(postgresConnectionOptions)
const conn = await pool.connect()
const documents = await conn.query(queryString, [embeddingString, _filter, k])

View File

@ -0,0 +1,145 @@
import path from 'path'
import { flatten } from 'lodash'
import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex'
import { Document as LCDocument } from 'langchain/document'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getUserHome } from '../../../src'
class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'SimpleStore'
this.name = 'simpleStoreLlamaIndex'
this.version = 1.0
this.type = 'SimpleVectorStore'
this.icon = 'simplevs.svg'
this.category = 'Vector Stores'
this.description = 'Upsert embedded data to local path and perform similarity search'
this.baseClasses = [this.type, 'VectorIndexRetriever']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true,
optional: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'BaseEmbedding_LlamaIndex'
},
{
label: 'Base Path to store',
name: 'basePath',
description:
'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored',
type: 'string',
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
optional: true
}
]
this.outputs = [
{
label: 'SimpleStore Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'SimpleStore Vector Store Index',
name: 'vectorStore',
baseClasses: [this.type, 'VectorStoreIndex']
}
]
}
//@ts-ignore
vectorStoreMethods = {
async upsert(nodeData: INodeData): Promise<void> {
const basePath = nodeData.inputs?.basePath as string
const docs = nodeData.inputs?.document as LCDocument[]
const embeddings = nodeData.inputs?.embeddings
const model = nodeData.inputs?.model
let filePath = ''
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
else filePath = basePath
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
finalDocs.push(new LCDocument(flattenDocs[i]))
}
const llamadocs: Document[] = []
for (const doc of finalDocs) {
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
}
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
try {
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
} catch (e) {
throw new Error(e)
}
}
}
async init(nodeData: INodeData): Promise<any> {
const basePath = nodeData.inputs?.basePath as string
const embeddings = nodeData.inputs?.embeddings
const model = nodeData.inputs?.model
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
let filePath = ''
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
else filePath = basePath
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
const index = await VectorStoreIndex.init({ storageContext, serviceContext })
const output = nodeData.outputs?.output as string
if (output === 'retriever') {
const retriever = index.asRetriever()
retriever.similarityTopK = k
;(retriever as any).serviceContext = serviceContext
return retriever
} else if (output === 'vectorStore') {
;(index as any).k = k
return index
}
return index
}
}
module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores }

View File

@ -0,0 +1,6 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-database" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<path d="M12 6m-8 0a8 3 0 1 0 16 0a8 3 0 1 0 -16 0"></path>
<path d="M4 6v6a8 3 0 0 0 16 0v-6"></path>
<path d="M4 12v6a8 3 0 0 0 16 0v-6"></path>
</svg>

After

Width:  |  Height:  |  Size: 451 B

View File

@ -1,6 +1,6 @@
{
"name": "flowise-components",
"version": "1.5.3",
"version": "1.6.0",
"description": "Flowiseai Components",
"main": "dist/src/index",
"types": "dist/src/index.d.ts",
@ -62,6 +62,7 @@
"langfuse-langchain": "2.3.3",
"langsmith": "0.0.53",
"linkifyjs": "^4.1.1",
"llamaindex": "^0.0.48",
"llmonitor": "^0.5.5",
"mammoth": "^1.5.1",
"moment": "^2.29.3",

View File

@ -98,6 +98,7 @@ export interface INodeProperties {
version: number
category: string // TODO: use enum instead of string
baseClasses: string[]
tags?: string[]
description?: string
filePath?: string
badge?: string

View File

@ -662,6 +662,28 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => {
}
}
/**
* Flatten nested object
* @param {ICommonObject} obj
* @param {string} parentKey
* @returns {ICommonObject}
*/
export const flattenObject = (obj: ICommonObject, parentKey?: string) => {
let result: any = {}
Object.keys(obj).forEach((key) => {
const value = obj[key]
const _key = parentKey ? parentKey + '.' + key : key
if (typeof value === 'object') {
result = { ...result, ...flattenObject(value, _key) }
} else {
result[_key] = value
}
})
return result
}
/**
* Convert BaseMessage to IMessage
* @param {BaseMessage[]} messages

View File

@ -21,6 +21,7 @@ PORT=3000
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# FLOWISE_FILE_SIZE_LIMIT=50mb
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs

View File

@ -1,5 +1,7 @@
{
"description": "Use OpenAI Function Agent and Chain to automatically decide which API to call, generating url and body request from conversation",
"categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,OpenAI Function Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation",
"categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -0,0 +1,464 @@
{
"description": "Return response as a JSON structure as specified by a Zod schema",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 508,
"id": "llmChain_0",
"position": {
"x": 1229.1699649849293,
"y": 245.55173505632646
},
"type": "customNode",
"data": {
"id": "llmChain_0",
"label": "LLM Chain",
"version": 3,
"name": "llmChain",
"type": "LLMChain",
"baseClasses": ["LLMChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Chain to run queries against LLMs",
"inputParams": [
{
"label": "Chain Name",
"name": "chainName",
"type": "string",
"placeholder": "Name Your Chain",
"optional": true,
"id": "llmChain_0-input-chainName-string"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "llmChain_0-input-model-BaseLanguageModel"
},
{
"label": "Prompt",
"name": "prompt",
"type": "BasePromptTemplate",
"id": "llmChain_0-input-prompt-BasePromptTemplate"
},
{
"label": "Output Parser",
"name": "outputParser",
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{chatPromptTemplate_0.data.instance}}",
"outputParser": "{{advancedStructuredOutputParser_0.data.instance}}",
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
"name": "llmChain",
"label": "LLM Chain",
"type": "LLMChain | BaseChain | Runnable"
},
{
"id": "llmChain_0-output-outputPrediction-string|json",
"name": "outputPrediction",
"label": "Output Prediction",
"type": "string | json"
}
],
"default": "llmChain"
}
],
"outputs": {
"output": "llmChain"
},
"selected": false
},
"positionAbsolute": {
"x": 1229.1699649849293,
"y": 245.55173505632646
},
"selected": false
},
{
"width": 300,
"height": 690,
"id": "chatPromptTemplate_0",
"position": {
"x": 493.26582927222483,
"y": -156.20470841335592
},
"type": "customNode",
"data": {
"id": "chatPromptTemplate_0",
"label": "Chat Prompt Template",
"version": 1,
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"],
"category": "Prompts",
"description": "Schema to represent a chat prompt",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.",
"id": "chatPromptTemplate_0-input-systemMessagePrompt-string"
},
{
"label": "Human Message",
"name": "humanMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "{text}",
"id": "chatPromptTemplate_0-input-humanMessagePrompt-string"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"id": "chatPromptTemplate_0-input-promptValues-json"
}
],
"inputAnchors": [],
"inputs": {
"systemMessagePrompt": "This AI is designed to only output information in JSON format without exception. This AI can only output JSON and will never output any other text.\n\nWhen asked to correct itself, this AI will only output the corrected JSON and never any other text.",
"humanMessagePrompt": "{text}",
"promptValues": ""
},
"outputAnchors": [
{
"id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"name": "chatPromptTemplate",
"label": "ChatPromptTemplate",
"type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 493.26582927222483,
"y": -156.20470841335592
},
"dragging": false
},
{
"width": 300,
"height": 576,
"id": "chatOpenAI_0",
"position": {
"x": 860.555928011636,
"y": -355.71028569475095
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "",
"temperature": "0",
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 860.555928011636,
"y": -355.71028569475095
},
"dragging": false
},
{
"width": 300,
"height": 454,
"id": "advancedStructuredOutputParser_0",
"position": {
"x": 489.3637511211284,
"y": 580.0628053662244
},
"type": "customNode",
"data": {
"id": "advancedStructuredOutputParser_0",
"label": "Advanced Structured Output Parser",
"version": 1,
"name": "advancedStructuredOutputParser",
"type": "AdvancedStructuredOutputParser",
"baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"],
"category": "Output Parsers",
"description": "Parse the output of an LLM call into a given structure by providing a Zod schema.",
"inputParams": [
{
"label": "Autofix",
"name": "autofixParser",
"type": "boolean",
"optional": true,
"description": "In the event that the first call fails, will make another call to the model to fix any errors.",
"id": "advancedStructuredOutputParser_0-input-autofixParser-boolean"
},
{
"label": "Example JSON",
"name": "exampleJson",
"type": "string",
"description": "Zod schema for the output of the model",
"rows": 10,
"default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})",
"id": "advancedStructuredOutputParser_0-input-exampleJson-string"
}
],
"inputAnchors": [],
"inputs": {
"autofixParser": "",
"exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})"
},
"outputAnchors": [
{
"id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable",
"name": "advancedStructuredOutputParser",
"label": "AdvancedStructuredOutputParser",
"type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 489.3637511211284,
"y": 580.0628053662244
}
}
],
"edges": [
{
"source": "chatPromptTemplate_0",
"sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
"type": "buttonedge",
"id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel"
},
{
"source": "advancedStructuredOutputParser_0",
"sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser",
"type": "buttonedge",
"id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser"
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "Output antonym of given user input using few-shot prompt template built with examples",
"categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion",
"categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use BabyAGI to create tasks and reprioritize for a given objective",
"categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Analyse and summarize CSV data",
"categories": "CSV Agent,ChatOpenAI,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions",
"categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools",
"categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA",
"categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -439,10 +441,10 @@
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document",
"id": "plainText_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_0-output-text-string|json",

View File

@ -0,0 +1,919 @@
{
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex",
"categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis",
"framework": "LlamaIndex",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 438,
"id": "textFile_0",
"position": {
"x": 221.215421786192,
"y": 94.91489477412404
},
"type": "customNode",
"data": {
"id": "textFile_0",
"label": "Text File",
"version": 3,
"name": "textFile",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from text files",
"inputParams": [
{
"label": "Txt File",
"name": "txtFile",
"type": "file",
"fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml",
"id": "textFile_0-input-txtFile-file"
},
{
"label": "Metadata",
"name": "metadata",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "textFile_0-input-metadata-json"
}
],
"inputAnchors": [
{
"label": "Text Splitter",
"name": "textSplitter",
"type": "TextSplitter",
"optional": true,
"id": "textFile_0-input-textSplitter-TextSplitter"
}
],
"inputs": {
"textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}",
"metadata": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
"name": "text",
"label": "Text",
"type": "string | json"
}
],
"default": "document"
}
],
"outputs": {
"output": "document"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 221.215421786192,
"y": 94.91489477412404
},
"dragging": false
},
{
"width": 300,
"height": 429,
"id": "recursiveCharacterTextSplitter_0",
"position": {
"x": -203.4868320229876,
"y": 101.32475976329766
},
"type": "customNode",
"data": {
"id": "recursiveCharacterTextSplitter_0",
"label": "Recursive Character Text Splitter",
"version": 2,
"name": "recursiveCharacterTextSplitter",
"type": "RecursiveCharacterTextSplitter",
"baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"],
"category": "Text Splitters",
"description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"",
"inputParams": [
{
"label": "Chunk Size",
"name": "chunkSize",
"type": "number",
"default": 1000,
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-chunkSize-number"
},
{
"label": "Chunk Overlap",
"name": "chunkOverlap",
"type": "number",
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number"
},
{
"label": "Custom Separators",
"name": "separators",
"type": "string",
"rows": 4,
"description": "Array of custom separators to determine when to split the text, will override the default separators",
"placeholder": "[\"|\", \"##\", \">\", \"-\"]",
"additionalParams": true,
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-separators-string"
}
],
"inputAnchors": [],
"inputs": {
"chunkSize": 1000,
"chunkOverlap": "",
"separators": ""
},
"outputAnchors": [
{
"id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"name": "recursiveCharacterTextSplitter",
"label": "RecursiveCharacterTextSplitter",
"type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -203.4868320229876,
"y": 101.32475976329766
},
"dragging": false
},
{
"width": 300,
"height": 334,
"id": "openAIEmbedding_LlamaIndex_0",
"position": {
"x": 176.27434578083106,
"y": 953.3664298122493
},
"type": "customNode",
"data": {
"id": "openAIEmbedding_LlamaIndex_0",
"label": "OpenAI Embedding",
"version": 1,
"name": "openAIEmbedding_LlamaIndex",
"type": "OpenAIEmbedding",
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
"tags": ["LlamaIndex"],
"category": "Embeddings",
"description": "OpenAI Embedding specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbedding_LlamaIndex_0-input-modelName-options"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
}
],
"inputAnchors": [],
"inputs": {
"timeout": "",
"basepath": ""
},
"outputAnchors": [
{
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"name": "openAIEmbedding_LlamaIndex",
"label": "OpenAIEmbedding",
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 176.27434578083106,
"y": 953.3664298122493
},
"dragging": false
},
{
"width": 300,
"height": 585,
"id": "pineconeLlamaIndex_0",
"position": {
"x": 609.3087433345761,
"y": 488.2141798951578
},
"type": "customNode",
"data": {
"id": "pineconeLlamaIndex_0",
"label": "Pinecone",
"version": 1,
"name": "pineconeLlamaIndex",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
"tags": ["LlamaIndex"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["pineconeApi"],
"id": "pineconeLlamaIndex_0-input-credential-credential"
},
{
"label": "Pinecone Index",
"name": "pineconeIndex",
"type": "string",
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
},
{
"label": "Pinecone Namespace",
"name": "pineconeNamespace",
"type": "string",
"placeholder": "my-first-namespace",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
},
{
"label": "Pinecone Metadata Filter",
"name": "pineconeMetadataFilter",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
},
{
"label": "Top K",
"name": "topK",
"description": "Number of top results to fetch. Default to 4",
"placeholder": "4",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-topK-number"
}
],
"inputAnchors": [
{
"label": "Document",
"name": "document",
"type": "Document",
"list": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-document-Document"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
}
],
"inputs": {
"document": ["{{textFile_0.data.instance}}"],
"model": "{{chatOpenAI_LlamaIndex_1.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
"name": "retriever",
"label": "Pinecone Retriever",
"type": "Pinecone | VectorIndexRetriever"
},
{
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex",
"name": "vectorStore",
"label": "Pinecone Vector Store Index",
"type": "Pinecone | VectorStoreIndex"
}
],
"default": "retriever"
}
],
"outputs": {
"output": "retriever"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 609.3087433345761,
"y": 488.2141798951578
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "chatOpenAI_LlamaIndex_1",
"position": {
"x": -195.15244974578656,
"y": 584.9467028201428
},
"type": "customNode",
"data": {
"id": "chatOpenAI_LlamaIndex_1",
"label": "ChatOpenAI",
"version": 1,
"name": "chatOpenAI_LlamaIndex",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_LlamaIndex_1-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_LlamaIndex_1-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_LlamaIndex_1-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"name": "chatOpenAI_LlamaIndex",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -195.15244974578656,
"y": 584.9467028201428
},
"dragging": false
},
{
"width": 300,
"height": 513,
"id": "contextChatEngine_0",
"position": {
"x": 1550.2553933740128,
"y": 270.7914631777829
},
"type": "customNode",
"data": {
"id": "contextChatEngine_0",
"label": "Context Chat Engine",
"version": 1,
"name": "contextChatEngine",
"type": "ContextChatEngine",
"baseClasses": ["ContextChatEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "contextChatEngine_0-input-returnSourceDocuments-boolean"
},
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.",
"id": "contextChatEngine_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "contextChatEngine_0-input-memory-BaseChatMemory"
}
],
"inputs": {
"model": "{{chatOpenAI_LlamaIndex_2.data.instance}}",
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}",
"systemMessagePrompt": "",
"returnSourceDocuments": true
},
"outputAnchors": [
{
"id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine",
"name": "contextChatEngine",
"label": "ContextChatEngine",
"type": "ContextChatEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1550.2553933740128,
"y": 270.7914631777829
},
"dragging": false
},
{
"width": 300,
"height": 329,
"id": "RedisBackedChatMemory_0",
"position": {
"x": 1081.252815805786,
"y": 990.1701092562037
},
"type": "customNode",
"data": {
"id": "RedisBackedChatMemory_0",
"label": "Redis-Backed Chat Memory",
"version": 2,
"name": "RedisBackedChatMemory",
"type": "RedisBackedChatMemory",
"baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Summarizes the conversation and stores the memory in Redis server",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"optional": true,
"credentialNames": ["redisCacheApi", "redisCacheUrlApi"],
"id": "RedisBackedChatMemory_0-input-credential-credential"
},
{
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionId-string"
},
{
"label": "Session Timeouts",
"name": "sessionTTL",
"type": "number",
"description": "Omit this parameter to make sessions never expire",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionTTL-number"
},
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"additionalParams": true,
"id": "RedisBackedChatMemory_0-input-memoryKey-string"
}
],
"inputAnchors": [],
"inputs": {
"sessionId": "",
"sessionTTL": "",
"memoryKey": "chat_history"
},
"outputAnchors": [
{
"id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"name": "RedisBackedChatMemory",
"label": "RedisBackedChatMemory",
"type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 1081.252815805786,
"y": 990.1701092562037
}
},
{
"width": 300,
"height": 529,
"id": "chatOpenAI_LlamaIndex_2",
"position": {
"x": 1015.1605888108386,
"y": -38.31143117572401
},
"type": "customNode",
"data": {
"id": "chatOpenAI_LlamaIndex_2",
"label": "ChatOpenAI",
"version": 1,
"name": "chatOpenAI_LlamaIndex",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_LlamaIndex_2-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_LlamaIndex_2-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_LlamaIndex_2-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"name": "chatOpenAI_LlamaIndex",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1015.1605888108386,
"y": -38.31143117572401
},
"dragging": false
}
],
"edges": [
{
"source": "recursiveCharacterTextSplitter_0",
"sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"target": "textFile_0",
"targetHandle": "textFile_0-input-textSplitter-TextSplitter",
"type": "buttonedge",
"id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter",
"data": {
"label": ""
}
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document|json-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_LlamaIndex_1",
"sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "pineconeLlamaIndex_0",
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"data": {
"label": ""
}
},
{
"source": "RedisBackedChatMemory_0",
"sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_LlamaIndex_2",
"sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "A conversational agent for a chat model which utilize chat specific prompts",
"categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Agent optimized for vector retrieval during conversation and answering questions based on previous dialogue.",
"categories": "Retriever Tool,Buffer Memory,ChatOpenAI,Conversational Retrieval Agent, Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Text file QnA using conversational retrieval QA chain",
"categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -233,10 +235,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -730,11 +732,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,6 +1,8 @@
{
"description": "Flowise Docs Github QnA using conversational retrieval QA chain",
"categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model",
"categories": "HuggingFace,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Split flows based on if else condition",
"categories": "IfElse Function,ChatOpenAI,OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "new",
"nodes": [
{

View File

@ -1,6 +1,8 @@
{
"description": "Generate image using Replicate Stability text-to-image generative AI model",
"badge": "NEW",
"categories": "Replicate,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"badge": "NEW",
"categories": "Moderation,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Return response as a list (array) instead of a string/text",
"badge": "NEW",
"categories": "CSV Output Parser,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "QnA chain using Ollama local LLM, LocalAI embedding model, and Faiss local vector store",
"badge": "POPULAR",
"categories": "Text File,ChatOllama,Conversational Retrieval QA Chain,Faiss,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -224,10 +226,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -649,11 +651,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-faiss_0-faiss_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "Use long term memory like Zep to differentiate conversations between users with sessionId",
"categories": "ChatOpenAI,Conversational Retrieval QA Chain,Zep Memory,Qdrant,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Upsert multiple files with metadata and filter by it using conversational retrieval QA chain",
"categories": "Text File,PDF File,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -126,10 +128,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -836,11 +838,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "A chain that automatically picks an appropriate prompt from multiple prompts",
"categories": "ChatOpenAI,Multi Prompt Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "A chain that automatically picks an appropriate retriever from multiple different vector databases",
"categories": "ChatOpenAI,Multi Retrieval QA Chain,Pinecone,Chroma,Supabase,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use the agent to choose between multiple different vector databases, with the ability to use other tools",
"categories": "Buffer Memory,ChatOpenAI,Chain Tool,Retrieval QA Chain,Redis,Faiss,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -964,10 +966,10 @@
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document",
"id": "plainText_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_0-output-text-string|json",
@ -1501,10 +1503,10 @@
"type": "options",
"options": [
{
"id": "plainText_1-output-document-Document",
"id": "plainText_1-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_1-output-text-string|json",
@ -1721,11 +1723,11 @@
},
{
"source": "plainText_0",
"sourceHandle": "plainText_0-output-document-Document",
"sourceHandle": "plainText_0-output-document-Document|json",
"target": "redis_0",
"targetHandle": "redis_0-input-document-Document",
"type": "buttonedge",
"id": "plainText_0-plainText_0-output-document-Document-redis_0-redis_0-input-document-Document",
"id": "plainText_0-plainText_0-output-document-Document|json-redis_0-redis_0-input-document-Document",
"data": {
"label": ""
}
@ -1776,11 +1778,11 @@
},
{
"source": "plainText_1",
"sourceHandle": "plainText_1-output-document-Document",
"sourceHandle": "plainText_1-output-document-Document|json",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "plainText_1-plainText_1-output-document-Document-faiss_0-faiss_0-input-document-Document",
"id": "plainText_1-plainText_1-output-document-Document|json-faiss_0-faiss_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call",
"categories": "Buffer Memory,Custom Tool, SerpAPI,OpenAI Function,Calculator Tool,ChatOpenAI,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "OpenAI Assistant that has instructions and can leverage models, tools, and knowledge to respond to user queries",
"categories": "Custom Tool, SerpAPI,OpenAI Assistant,Calculator Tool,Langchain",
"framework": "Langchain",
"badge": "NEW",
"nodes": [
{

View File

@ -1,6 +1,8 @@
{
"description": "Use chat history to rephrase user question, and answer the rephrased question using retrieved docs from vector store",
"categories": "ChatOpenAI,LLM Chain,SingleStore,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use output from a chain as prompt for another chain",
"categories": "Custom Tool,OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -0,0 +1,549 @@
{
"description": "Stateless query engine designed to answer question over your data using LlamaIndex",
"categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex",
"badge": "NEW",
"framework": "LlamaIndex",
"nodes": [
{
"width": 300,
"height": 382,
"id": "queryEngine_0",
"position": {
"x": 1407.9610494306783,
"y": 241.12144405808692
},
"type": "customNode",
"data": {
"id": "queryEngine_0",
"label": "Query Engine",
"version": 1,
"name": "queryEngine",
"type": "QueryEngine",
"baseClasses": ["QueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "queryEngine_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
"responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
"returnSourceDocuments": true
},
"outputAnchors": [
{
"id": "queryEngine_0-output-queryEngine-QueryEngine",
"name": "queryEngine",
"label": "QueryEngine",
"type": "QueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1407.9610494306783,
"y": 241.12144405808692
},
"dragging": false
},
{
"width": 300,
"height": 585,
"id": "pineconeLlamaIndex_0",
"position": {
"x": 977.3886641397302,
"y": -261.2253031641797
},
"type": "customNode",
"data": {
"id": "pineconeLlamaIndex_0",
"label": "Pinecone",
"version": 1,
"name": "pineconeLlamaIndex",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
"tags": ["LlamaIndex"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["pineconeApi"],
"id": "pineconeLlamaIndex_0-input-credential-credential"
},
{
"label": "Pinecone Index",
"name": "pineconeIndex",
"type": "string",
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
},
{
"label": "Pinecone Namespace",
"name": "pineconeNamespace",
"type": "string",
"placeholder": "my-first-namespace",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
},
{
"label": "Pinecone Metadata Filter",
"name": "pineconeMetadataFilter",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
},
{
"label": "Top K",
"name": "topK",
"description": "Number of top results to fetch. Default to 4",
"placeholder": "4",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-topK-number"
}
],
"inputAnchors": [
{
"label": "Document",
"name": "document",
"type": "Document",
"list": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-document-Document"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
}
],
"inputs": {
"document": "",
"model": "{{chatAnthropic_LlamaIndex_0.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
"name": "retriever",
"label": "Pinecone Retriever",
"type": "Pinecone | VectorIndexRetriever"
},
{
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex",
"name": "vectorStore",
"label": "Pinecone Vector Store Index",
"type": "Pinecone | VectorStoreIndex"
}
],
"default": "retriever"
}
],
"outputs": {
"output": "retriever"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 977.3886641397302,
"y": -261.2253031641797
},
"dragging": false
},
{
"width": 300,
"height": 334,
"id": "openAIEmbedding_LlamaIndex_0",
"position": {
"x": 529.8690713844503,
"y": -18.955726653613254
},
"type": "customNode",
"data": {
"id": "openAIEmbedding_LlamaIndex_0",
"label": "OpenAI Embedding",
"version": 1,
"name": "openAIEmbedding_LlamaIndex",
"type": "OpenAIEmbedding",
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
"tags": ["LlamaIndex"],
"category": "Embeddings",
"description": "OpenAI Embedding specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbedding_LlamaIndex_0-input-modelName-options"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
}
],
"inputAnchors": [],
"inputs": {
"timeout": "",
"basepath": ""
},
"outputAnchors": [
{
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"name": "openAIEmbedding_LlamaIndex",
"label": "OpenAIEmbedding",
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 529.8690713844503,
"y": -18.955726653613254
},
"dragging": false
},
{
"width": 300,
"height": 749,
"id": "compactrefineLlamaIndex_0",
"position": {
"x": 170.71031618977543,
"y": -33.83233752386292
},
"type": "customNode",
"data": {
"id": "compactrefineLlamaIndex_0",
"label": "Compact and Refine",
"version": 1,
"name": "compactrefineLlamaIndex",
"type": "CompactRefine",
"baseClasses": ["CompactRefine", "ResponseSynthesizer"],
"tags": ["LlamaIndex"],
"category": "Response Synthesizer",
"description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.",
"inputParams": [
{
"label": "Refine Prompt",
"name": "refinePrompt",
"type": "string",
"rows": 4,
"default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
"warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}",
"optional": true,
"id": "compactrefineLlamaIndex_0-input-refinePrompt-string"
},
{
"label": "Text QA Prompt",
"name": "textQAPrompt",
"type": "string",
"rows": 4,
"default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:",
"warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}",
"optional": true,
"id": "compactrefineLlamaIndex_0-input-textQAPrompt-string"
}
],
"inputAnchors": [],
"inputs": {
"refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
"textQAPrompt": "Context information:\n<context>\n{context}\n</context>\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}"
},
"outputAnchors": [
{
"id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
"name": "compactrefineLlamaIndex",
"label": "CompactRefine",
"type": "CompactRefine | ResponseSynthesizer"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 170.71031618977543,
"y": -33.83233752386292
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "chatAnthropic_LlamaIndex_0",
"position": {
"x": 521.3530883359147,
"y": -584.8241219614786
},
"type": "customNode",
"data": {
"id": "chatAnthropic_LlamaIndex_0",
"label": "ChatAnthropic",
"version": 1,
"name": "chatAnthropic_LlamaIndex",
"type": "ChatAnthropic",
"baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around ChatAnthropic LLM specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["anthropicApi"],
"id": "chatAnthropic_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "claude-2",
"name": "claude-2",
"description": "Claude 2 latest major version, automatically get updates to the model as they are released"
},
{
"label": "claude-2.1",
"name": "claude-2.1",
"description": "Claude 2 latest full version"
},
{
"label": "claude-instant-1",
"name": "claude-instant-1",
"description": "Claude Instant latest major version, automatically get updates to the model as they are released"
},
{
"label": "claude-v1",
"name": "claude-v1"
},
{
"label": "claude-v1-100k",
"name": "claude-v1-100k"
},
{
"label": "claude-v1.0",
"name": "claude-v1.0"
},
{
"label": "claude-v1.2",
"name": "claude-v1.2"
},
{
"label": "claude-v1.3",
"name": "claude-v1.3"
},
{
"label": "claude-v1.3-100k",
"name": "claude-v1.3-100k"
},
{
"label": "claude-instant-v1",
"name": "claude-instant-v1"
},
{
"label": "claude-instant-v1-100k",
"name": "claude-instant-v1-100k"
},
{
"label": "claude-instant-v1.0",
"name": "claude-instant-v1.0"
},
{
"label": "claude-instant-v1.1",
"name": "claude-instant-v1.1"
},
{
"label": "claude-instant-v1.1-100k",
"name": "claude-instant-v1.1-100k"
}
],
"default": "claude-2",
"optional": true,
"id": "chatAnthropic_LlamaIndex_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatAnthropic_LlamaIndex_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokensToSample",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_LlamaIndex_0-input-topP-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "claude-2",
"temperature": 0.9,
"maxTokensToSample": "",
"topP": ""
},
"outputAnchors": [
{
"id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
"name": "chatAnthropic_LlamaIndex",
"label": "ChatAnthropic",
"type": "ChatAnthropic | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 521.3530883359147,
"y": -584.8241219614786
},
"dragging": false
}
],
"edges": [
{
"source": "pineconeLlamaIndex_0",
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"target": "queryEngine_0",
"targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"data": {
"label": ""
}
},
{
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "compactrefineLlamaIndex_0",
"sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
"target": "queryEngine_0",
"targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
"type": "buttonedge",
"id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
"data": {
"label": ""
}
},
{
"source": "chatAnthropic_LlamaIndex_0",
"sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "An agent that uses ReAct logic to decide what action to take",
"categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Use Replicate API that runs Llama 13b v2 model with LLMChain",
"categories": "Replicate,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Answer questions over a SQL database",
"categories": "ChatOpenAI,Sql Database Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Manually construct prompts to query a SQL database",
"categories": "IfElse Function,Variable Set/Get,Custom JS Function,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "new",
"nodes": [
{

View File

@ -0,0 +1,272 @@
{
"description": "Simple chat engine to handle back and forth conversations using LlamaIndex",
"categories": "BufferMemory,AzureChatOpenAI,LlamaIndex",
"framework": "LlamaIndex",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 462,
"id": "simpleChatEngine_0",
"position": {
"x": 1210.127368000538,
"y": 324.98110560103896
},
"type": "customNode",
"data": {
"id": "simpleChatEngine_0",
"label": "Simple Chat Engine",
"version": 1,
"name": "simpleChatEngine",
"type": "SimpleChatEngine",
"baseClasses": ["SimpleChatEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple engine to handle back and forth conversations",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"placeholder": "You are a helpful assistant",
"id": "simpleChatEngine_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "simpleChatEngine_0-input-memory-BaseChatMemory"
}
],
"inputs": {
"model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"systemMessagePrompt": "You are a helpful assistant."
},
"outputAnchors": [
{
"id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine",
"name": "simpleChatEngine",
"label": "SimpleChatEngine",
"type": "SimpleChatEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 1210.127368000538,
"y": 324.98110560103896
}
},
{
"width": 300,
"height": 376,
"id": "bufferMemory_0",
"position": {
"x": 393.9823478014782,
"y": 415.7414943210391
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"version": 1,
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Remembers previous conversational back and forths directly",
"inputParams": [
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"id": "bufferMemory_0-input-memoryKey-string"
},
{
"label": "Input Key",
"name": "inputKey",
"type": "string",
"default": "input",
"id": "bufferMemory_0-input-inputKey-string"
}
],
"inputAnchors": [],
"inputs": {
"memoryKey": "chat_history",
"inputKey": "input"
},
"outputAnchors": [
{
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"name": "bufferMemory",
"label": "BufferMemory",
"type": "BufferMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 393.9823478014782,
"y": 415.7414943210391
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "azureChatOpenAI_LlamaIndex_0",
"position": {
"x": 746.5530862509605,
"y": -54.107978373323306
},
"type": "customNode",
"data": {
"id": "azureChatOpenAI_LlamaIndex_0",
"label": "AzureChatOpenAI",
"version": 1,
"name": "azureChatOpenAI_LlamaIndex",
"type": "AzureChatOpenAI",
"baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["azureOpenAIApi"],
"id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
}
],
"default": "gpt-3.5-turbo-16k",
"optional": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
"name": "azureChatOpenAI_LlamaIndex",
"label": "AzureChatOpenAI",
"type": "AzureChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 746.5530862509605,
"y": -54.107978373323306
},
"dragging": false
}
],
"edges": [
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "simpleChatEngine_0",
"targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
},
{
"source": "azureChatOpenAI_LlamaIndex_0",
"sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
"target": "simpleChatEngine_0",
"targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT",
"categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain",
"framework": "Langchain",
"badge": "POPULAR",
"nodes": [
{

View File

@ -1,5 +1,7 @@
{
"description": "Basic example of stateless (no memory) LLM Chain with a Prompt Template and LLM Model",
"categories": "OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Return response as a specified JSON structure instead of a string/text",
"categories": "Structured Output Parser,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "NEW",
"nodes": [
{

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,7 @@
{
"description": "Language translation using LLM Chain with a Chat Prompt Template and Chat Model",
"categories": "Chat Prompt Template,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

Some files were not shown because too many files have changed in this diff Show More