Chore/update deprecating nodes (#2540)

* update deprecating nodes

* add filters use cases to marketplace

* update log level
pull/2804/head
Henry Heng 2024-07-12 18:37:57 +01:00 committed by GitHub
parent 9ea439d135
commit 363d1bfc44
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
144 changed files with 6939 additions and 12160 deletions

View File

@ -26,7 +26,7 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# DISABLE_CHATFLOW_REUSE=true
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# LOG_LEVEL=info (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash

View File

@ -1,187 +0,0 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
class ConversationalRetrievalAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
badge?: string
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent'
this.version = 4.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.badge = 'DEPRECATING'
this.icon = 'agent.svg'
this.description = `An agent optimized for retrieval during conversation, answering questions based on past dialogue, all using OpenAI's Function Calling`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Allowed Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
default: defaultMessage,
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return res?.output
}
}
const prepareAgent = (nodeData: INodeData, options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const maxIterations = nodeData.inputs?.maxIterations as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const prompt = ChatPromptTemplate.fromMessages([
['ai', systemMessage ? systemMessage : defaultMessage],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithFunctions = model.bind({
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithFunctions,
new OpenAIFunctionsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
returnIntermediateSteps: true,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }

View File

@ -1,7 +0,0 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10 6C10 5.44772 10.4477 5 11 5H21C21.5523 5 22 5.44772 22 6V11C22 13.2091 20.2091 15 18 15H14C11.7909 15 10 13.2091 10 11V6Z" stroke="black" stroke-width="2" stroke-linejoin="round"/>
<path d="M16 5V3" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<circle cx="14" cy="9" r="1.5" fill="black"/>
<circle cx="18" cy="9" r="1.5" fill="black"/>
<path d="M26 27C26 22.0294 21.5228 18 16 18C10.4772 18 6 22.0294 6 27" stroke="black" stroke-width="2" stroke-linecap="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 616 B

View File

@ -1 +0,0 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M5 6H4v19.5h1m8-7.5v3h1m7-11.5V6h1m-5 7.5V10h1" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/><mask id="MistralAI__a" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="5" y="6" width="22" height="20"><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" fill="#FD7000"/></mask><g mask="url(#MistralAI__a)"><path fill="#FFCD00" d="M4 6h25v4H4z"/></g><mask id="MistralAI__b" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="5" y="6" width="22" height="20"><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" fill="#FD7000"/></mask><g mask="url(#MistralAI__b)"><path fill="#FFA200" d="M4 10h25v4H4z"/></g><mask id="MistralAI__c" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="5" y="6" width="22" height="20"><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" fill="#FD7000"/></mask><g mask="url(#MistralAI__c)"><path fill="#FF6E00" d="M4 14h25v4H4z"/></g><mask id="MistralAI__d" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="5" y="6" width="22" height="20"><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" fill="#FD7000"/></mask><g mask="url(#MistralAI__d)"><path fill="#FF4A09" d="M4 18h25v4H4z"/></g><mask id="MistralAI__e" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="5" y="6" width="22" height="20"><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" fill="#FD7000"/></mask><g mask="url(#MistralAI__e)"><path fill="#FE060F" d="M4 22h25v4H4z"/></g><path d="M21 18v7h1" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/><path d="M5 6v19.5h5v-8h4V21h4v-3.5h4V25h5V6h-4.5v4H18v3.5h-4v-4h-4V6H5Z" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>

Before

Width:  |  Height:  |  Size: 1.8 KiB

View File

@ -1,213 +0,0 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI } from '@langchain/openai'
import { convertToOpenAITool } from '@langchain/core/utils/function_calling'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { OpenAIToolsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MistralAIToolAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'MistralAI Tool Agent'
this.name = 'mistralAIToolAgent'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'MistralAI.svg'
this.badge = 'DEPRECATING'
this.description = `Agent that uses MistralAI Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'MistralAI Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
let finalRes = res?.output
if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
}
}
const prepareAgent = (nodeData: INodeData, options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const maxIterations = nodeData.inputs?.maxIterations as string
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const llmWithTools = model.bind({
tools: tools.map(convertToOpenAITool)
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
return messages ?? []
}
},
prompt,
llmWithTools,
new OpenAIToolsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: MistralAIToolAgent_Agents }

View File

@ -1,212 +0,0 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class OpenAIFunctionAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
badge?: string
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent'
this.version = 4.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `An agent that uses OpenAI Function Calling to pick the tool and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Allowed Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
let finalRes = res?.output
if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
}
}
const prepareAgent = (nodeData: INodeData, options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }) => {
const model = nodeData.inputs?.model as ChatOpenAI
const maxIterations = nodeData.inputs?.maxIterations as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithFunctions = model.bind({
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithFunctions,
new OpenAIFunctionsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: OpenAIFunctionAgent_Agents }

View File

@ -1,9 +0,0 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M16 12.6108L22 15.9608" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M7.17701 19.5848C6.49568 20.4069 6.12505 21.4424 6.12993 22.5101C6.13481 23.5779 6.51489 24.6099 7.2037 25.4258C7.89252 26.2416 8.84622 26.7893 9.89802 26.9732C10.9498 27.157 12.0328 26.9653 12.9575 26.4314L15.4787 24.9657M18.6002 14.106V19.5848" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M8.19877 9.98459C6.39026 9.67775 4.57524 10.4982 3.60403 12.1806C3.00524 13.2178 2.84295 14.4504 3.15284 15.6073C3.46273 16.7642 4.21943 17.7507 5.25652 18.3498L10.3049 21.3269C10.6109 21.5074 10.9898 21.5119 11.3001 21.3388L18.6 17.2655" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M17.0172 6.06585C16.6456 5.06522 15.9342 4.227 15.0072 3.6977C14.0803 3.1684 12.9969 2.98168 11.9462 3.17018C10.8956 3.35869 9.94464 3.91042 9.25954 4.72895C8.57444 5.54747 8.19879 6.58074 8.19824 7.64814V13.6575C8.19824 14.0154 8.38951 14.346 8.69977 14.5244L15.9992 18.7215" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M24.8216 11.7476C25.5029 10.9255 25.8735 9.89004 25.8687 8.8223C25.8638 7.75457 25.4837 6.72253 24.7949 5.90667C24.1061 5.09082 23.1524 4.54308 22.1006 4.35924C21.0488 4.17541 19.9658 4.36718 19.0411 4.90101L13.8942 7.90613C13.5872 8.08539 13.3984 8.41418 13.3984 8.76971V17.2265" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M28.3944 19.0635C28.9932 18.0263 29.1555 16.7937 28.8456 15.6368C28.5357 14.4799 27.779 13.4934 26.7419 12.8943L21.6409 9.91752C21.3316 9.73703 20.9494 9.7357 20.6388 9.91405L13.3984 14.0723" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M18 28.9997H18.8071C19.6909 28.9997 20.4526 28.3921 20.6297 27.546L21.991 21.4537C22.1681 20.6076 22.9299 20 23.8136 20H24.6207M20.0929 22.7023H23.8136M24 25.0214H24.5014C24.8438 25.0214 25.1586 25.2052 25.3207 25.5L27.3429 28.5213C27.5051 28.8161 27.8198 29 28.1622 29H28.6997M24.049 29C24.6261 29 25.1609 28.7041 25.4578 28.2205L27.2424 25.8009C27.5393 25.3173 28.0741 25.0214 28.6512 25.0214" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

View File

@ -1,211 +0,0 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { convertToOpenAITool } from '@langchain/core/utils/function_calling'
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
import { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor } from '../../../src/agents'
import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class OpenAIToolAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Tool Agent'
this.name = 'openAIToolAgent'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
let finalRes = res?.output
if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
}
}
const prepareAgent = (nodeData: INodeData, options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }) => {
const model = nodeData.inputs?.model as ChatOpenAI
const maxIterations = nodeData.inputs?.maxIterations as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithTools = model.bind({ tools: tools.map(convertToOpenAITool) })
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithTools,
new OpenAIToolsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: OpenAIToolAgent_Agents }

View File

@ -1,9 +0,0 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M16 12.6108L22 15.9608" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M7.17701 19.5848C6.49568 20.4069 6.12505 21.4424 6.12993 22.5101C6.13481 23.5779 6.51489 24.6099 7.2037 25.4258C7.89252 26.2416 8.84622 26.7893 9.89802 26.9732C10.9498 27.157 12.0328 26.9653 12.9575 26.4314L15.4787 24.9657M18.6002 14.106V19.5848" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M8.19877 9.98459C6.39026 9.67775 4.57524 10.4982 3.60403 12.1806C3.00524 13.2178 2.84295 14.4504 3.15284 15.6073C3.46273 16.7642 4.21943 17.7507 5.25652 18.3498L10.3049 21.3269C10.6109 21.5074 10.9898 21.5119 11.3001 21.3388L18.6 17.2655" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M17.0172 6.06585C16.6456 5.06522 15.9342 4.227 15.0072 3.6977C14.0803 3.1684 12.9969 2.98168 11.9462 3.17018C10.8956 3.35869 9.94464 3.91042 9.25954 4.72895C8.57444 5.54747 8.19879 6.58074 8.19824 7.64814V13.6575C8.19824 14.0154 8.38951 14.346 8.69977 14.5244L15.9992 18.7215" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M24.8216 11.7476C25.5029 10.9255 25.8735 9.89004 25.8687 8.8223C25.8638 7.75457 25.4837 6.72253 24.7949 5.90667C24.1061 5.09082 23.1524 4.54308 22.1006 4.35924C21.0488 4.17541 19.9658 4.36718 19.0411 4.90101L13.8942 7.90613C13.5872 8.08539 13.3984 8.41418 13.3984 8.76971V17.2265" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M28.3944 19.0635C28.9932 18.0263 29.1555 16.7937 28.8456 15.6368C28.5357 14.4799 27.779 13.4934 26.7419 12.8943L21.6409 9.91752C21.3316 9.73703 20.9494 9.7357 20.6388 9.91405L13.3984 14.0723" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M18 28.9997H18.8071C19.6909 28.9997 20.4526 28.3921 20.6297 27.546L21.991 21.4537C22.1681 20.6076 22.9299 20 23.8136 20H24.6207M20.0929 22.7023H23.8136M24 25.0214H24.5014C24.8438 25.0214 25.1586 25.2052 25.3207 25.5L27.3429 28.5213C27.5051 28.8161 27.8198 29 28.1622 29H28.6997M24.049 29C24.6261 29 25.1609 28.7041 25.4578 28.2205L27.2424 25.8009C27.5393 25.3173 28.0741 25.0214 28.6512 25.0214" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

View File

@ -36,7 +36,6 @@ class ToolAgent_Agents implements INode {
this.icon = 'toolAgent.png'
this.description = `Agent that uses Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Tools',

View File

@ -45,7 +45,6 @@ class ChatOllamaFunction_ChatModels implements INode {
this.category = 'Chat Models'
this.description = 'Run open-source function-calling compatible LLM on Ollama'
this.baseClasses = [this.type, ...getBaseClasses(OllamaFunctions)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Cache',

View File

@ -23,7 +23,6 @@ class CustomDocumentLoader_DocumentLoaders implements INode {
this.type = 'Document'
this.icon = 'customDocLoader.svg'
this.category = 'Document Loaders'
this.badge = 'NEW'
this.description = `Custom function for loading documents`
this.baseClasses = [this.type]
this.inputs = [

View File

@ -22,7 +22,6 @@ class DocStore_DocumentLoaders implements INode {
this.version = 1.0
this.type = 'Document'
this.icon = 'dstore.svg'
this.badge = 'NEW'
this.category = 'Document Loaders'
this.description = `Load data from pre-configured document stores`
this.baseClasses = [this.type]

View File

@ -25,7 +25,6 @@ class MySQLRecordManager_RecordManager implements INode {
this.category = 'Record Manager'
this.description = 'Use MySQL to keep track of document writes into the vector databases'
this.baseClasses = [this.type, 'RecordManager', ...getBaseClasses(MySQLRecordManager)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Host',

View File

@ -25,7 +25,6 @@ class PostgresRecordManager_RecordManager implements INode {
this.category = 'Record Manager'
this.description = 'Use Postgres to keep track of document writes into the vector databases'
this.baseClasses = [this.type, 'RecordManager', ...getBaseClasses(PostgresRecordManager)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Host',

View File

@ -25,7 +25,6 @@ class SQLiteRecordManager_RecordManager implements INode {
this.category = 'Record Manager'
this.description = 'Use SQLite to keep track of document writes into the vector databases'
this.baseClasses = [this.type, 'RecordManager', ...getBaseClasses(SQLiteRecordManager)]
this.badge = 'NEW'
this.inputs = [
{
label: 'Database File Path',

View File

@ -26,7 +26,6 @@ class CohereRerankRetriever_Retrievers implements INode {
this.type = 'Cohere Rerank Retriever'
this.icon = 'Cohere.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'Cohere Rerank indexes the documents from most to least semantically relevant to the query.'
this.baseClasses = [this.type, 'BaseRetriever']
this.credential = {

View File

@ -25,7 +25,6 @@ class EmbeddingsFilterRetriever_Retrievers implements INode {
this.type = 'EmbeddingsFilterRetriever'
this.icon = 'compressionRetriever.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'A document compressor that uses embeddings to drop documents unrelated to the query'
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [

View File

@ -25,7 +25,6 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
this.type = 'LLMFilterRetriever'
this.icon = 'llmFilterRetriever.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description =
'Iterate over the initially returned documents and extract, from each, only the content that is relevant to the query'
this.baseClasses = [this.type, 'BaseRetriever']

View File

@ -24,7 +24,6 @@ class RRFRetriever_Retrievers implements INode {
this.name = 'RRFRetriever'
this.version = 1.0
this.type = 'RRFRetriever'
this.badge = 'NEW'
this.icon = 'rrfRetriever.svg'
this.category = 'Retrievers'
this.description = 'Reciprocal Rank Fusion to re-rank search results by multiple query generation.'

View File

@ -26,7 +26,6 @@ class VoyageAIRerankRetriever_Retrievers implements INode {
this.type = 'VoyageAIRerankRetriever'
this.icon = 'voyageai.png'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'Voyage AI Rerank indexes the documents from most to least semantically relevant to the query.'
this.baseClasses = [this.type, 'BaseRetriever']
this.credential = {

View File

@ -36,7 +36,6 @@ class E2B_Tools implements INode {
this.type = 'E2B'
this.icon = 'e2b.png'
this.category = 'Tools'
this.badge = 'NEW'
this.description = 'Execute code in E2B Code Intepreter'
this.baseClasses = [this.type, 'Tool', ...getBaseClasses(E2BTool)]
this.credential = {

View File

@ -36,7 +36,6 @@ class PythonInterpreter_Tools implements INode {
this.type = 'PythonInterpreter'
this.icon = 'python.svg'
this.category = 'Tools'
this.badge = 'NEW'
this.description = 'Execute python code in Pyodide sandbox environment'
this.baseClasses = [this.type, 'Tool', ...getBaseClasses(PythonInterpreterTool)]
this.inputs = [

View File

@ -11,6 +11,7 @@ class CustomFunction_Utilities implements INode {
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
@ -18,12 +19,13 @@ class CustomFunction_Utilities implements INode {
constructor() {
this.label = 'Custom JS Function'
this.name = 'customFunction'
this.version = 1.0
this.version = 2.0
this.type = 'CustomFunction'
this.icon = 'customfunction.svg'
this.category = 'Utilities'
this.description = `Execute custom javascript function`
this.baseClasses = [this.type, 'Utilities']
this.tags = ['Utilities']
this.inputs = [
{
label: 'Input Variables',

View File

@ -8,6 +8,7 @@ class GetVariable_Utilities implements INode {
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
@ -15,12 +16,13 @@ class GetVariable_Utilities implements INode {
constructor() {
this.label = 'Get Variable'
this.name = 'getVariable'
this.version = 1.0
this.version = 2.0
this.type = 'GetVariable'
this.icon = 'getvar.svg'
this.category = 'Utilities'
this.description = `Get variable that was saved using Set Variable node`
this.baseClasses = [this.type, 'Utilities']
this.tags = ['Utilities']
this.inputs = [
{
label: 'Variable Name',

View File

@ -11,6 +11,7 @@ class IfElseFunction_Utilities implements INode {
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
@ -18,12 +19,13 @@ class IfElseFunction_Utilities implements INode {
constructor() {
this.label = 'IfElse Function'
this.name = 'ifElseFunction'
this.version = 1.0
this.version = 2.0
this.type = 'IfElseFunction'
this.icon = 'ifelsefunction.svg'
this.category = 'Utilities'
this.description = `Split flows based on If Else javascript functions`
this.baseClasses = [this.type, 'Utilities']
this.tags = ['Utilities']
this.inputs = [
{
label: 'Input Variables',

View File

@ -8,6 +8,7 @@ class SetVariable_Utilities implements INode {
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
@ -15,11 +16,12 @@ class SetVariable_Utilities implements INode {
constructor() {
this.label = 'Set Variable'
this.name = 'setVariable'
this.version = 1.0
this.version = 2.0
this.type = 'SetVariable'
this.icon = 'setvar.svg'
this.category = 'Utilities'
this.description = `Set variable which can be retrieved at a later stage. Variable is only available during runtime.`
this.tags = ['Utilities']
this.baseClasses = [this.type, 'Utilities']
this.inputs = [
{

View File

@ -8,16 +8,18 @@ class StickyNote implements INode {
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Sticky Note'
this.name = 'stickyNote'
this.version = 1.0
this.version = 2.0
this.type = 'StickyNote'
this.icon = 'stickyNote.svg'
this.category = 'Utilities'
this.tags = ['Utilities']
this.description = 'Add a sticky note'
this.inputs = [
{

View File

@ -30,7 +30,6 @@ class Chroma_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity search upon query using Chroma, an open-source embedding database'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,129 +0,0 @@
import { Chroma } from '@langchain/community/vectorstores/chroma'
import { Embeddings } from '@langchain/core/embeddings'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ChromaExtended } from './core'
class Chroma_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Chroma Load Existing Index'
this.name = 'chromaExistingIndex'
this.version = 1.0
this.type = 'Chroma'
this.icon = 'chroma.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Chroma (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed if you have chroma on cloud services with X-Api-key',
optional: true,
credentialNames: ['chromaApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Collection Name',
name: 'collectionName',
type: 'string'
},
{
label: 'Chroma URL',
name: 'chromaURL',
type: 'string',
optional: true
},
{
label: 'Chroma Metadata Filter',
name: 'chromaMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Chroma Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Chroma Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(Chroma)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const collectionName = nodeData.inputs?.collectionName as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const chromaURL = nodeData.inputs?.chromaURL as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const chromaApiKey = getCredentialParam('chromaApiKey', credentialData, nodeData)
const chromaMetadataFilter = nodeData.inputs?.chromaMetadataFilter
const obj: {
collectionName: string
url?: string
chromaApiKey?: string
filter?: object | undefined
} = { collectionName }
if (chromaURL) obj.url = chromaURL
if (chromaApiKey) obj.chromaApiKey = chromaApiKey
if (chromaMetadataFilter) {
const metadatafilter = typeof chromaMetadataFilter === 'object' ? chromaMetadataFilter : JSON.parse(chromaMetadataFilter)
obj.filter = metadatafilter
}
const vectorStore = await ChromaExtended.fromExistingCollection(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (chromaMetadataFilter) {
;(vectorStore as any).filter = obj.filter
}
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Chroma_Existing_VectorStores }

View File

@ -1,129 +0,0 @@
import { flatten } from 'lodash'
import { Chroma } from '@langchain/community/vectorstores/chroma'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ChromaExtended } from './core'
class ChromaUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Chroma Upsert Document'
this.name = 'chromaUpsert'
this.version = 1.0
this.type = 'Chroma'
this.icon = 'chroma.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Chroma'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed if you have chroma on cloud services with X-Api-key',
optional: true,
credentialNames: ['chromaApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Collection Name',
name: 'collectionName',
type: 'string'
},
{
label: 'Chroma URL',
name: 'chromaURL',
type: 'string',
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Chroma Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Chroma Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(Chroma)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const collectionName = nodeData.inputs?.collectionName as string
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const chromaURL = nodeData.inputs?.chromaURL as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const chromaApiKey = getCredentialParam('chromaApiKey', credentialData, nodeData)
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const obj: {
collectionName: string
url?: string
chromaApiKey?: string
} = { collectionName }
if (chromaURL) obj.url = chromaURL
if (chromaApiKey) obj.chromaApiKey = chromaApiKey
const vectorStore = await ChromaExtended.fromDocuments(finalDocs, embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: ChromaUpsert_VectorStores }

View File

@ -1,208 +0,0 @@
import {
getBaseClasses,
getCredentialData,
getCredentialParam,
ICommonObject,
INodeData,
INodeOutputsValue,
INodeParams
} from '../../../src'
import { Client, ClientOptions } from '@elastic/elasticsearch'
import { ElasticClientArgs, ElasticVectorSearch } from '@langchain/community/vectorstores/elasticsearch'
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStore } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
export abstract class ElasticSearchBase {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
protected constructor() {
this.type = 'Elasticsearch'
this.icon = 'elasticsearch.png'
this.category = 'Vector Stores'
this.badge = 'DEPRECATING'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['elasticsearchApi', 'elasticSearchUserPassword']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Index Name',
name: 'indexName',
placeholder: '<INDEX_NAME>',
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Similarity',
name: 'similarity',
description: 'Similarity measure used in Elasticsearch.',
type: 'options',
default: 'l2_norm',
options: [
{
label: 'l2_norm',
name: 'l2_norm'
},
{
label: 'dot_product',
name: 'dot_product'
},
{
label: 'cosine',
name: 'cosine'
}
],
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Elasticsearch Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Elasticsearch Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(ElasticVectorSearch)]
}
]
}
abstract constructVectorStore(
embeddings: Embeddings,
elasticSearchClientArgs: ElasticClientArgs,
docs: Document<Record<string, any>>[] | undefined
): Promise<VectorStore>
async init(nodeData: INodeData, _: string, options: ICommonObject, docs: Document<Record<string, any>>[] | undefined): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const endPoint = getCredentialParam('endpoint', credentialData, nodeData)
const cloudId = getCredentialParam('cloudId', credentialData, nodeData)
const indexName = nodeData.inputs?.indexName as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
const similarityMeasure = nodeData.inputs?.similarityMeasure as string
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
const elasticSearchClientArgs = this.prepareClientArgs(endPoint, cloudId, credentialData, nodeData, similarityMeasure, indexName)
const vectorStore = await this.constructVectorStore(embeddings, elasticSearchClientArgs, docs)
if (output === 'retriever') {
return vectorStore.asRetriever(k)
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
protected prepareConnectionOptions(
endPoint: string | undefined,
cloudId: string | undefined,
credentialData: ICommonObject,
nodeData: INodeData
) {
let elasticSearchClientOptions: ClientOptions = {}
if (endPoint) {
let apiKey = getCredentialParam('apiKey', credentialData, nodeData)
elasticSearchClientOptions = {
node: endPoint,
auth: {
apiKey: apiKey
}
}
} else if (cloudId) {
let username = getCredentialParam('username', credentialData, nodeData)
let password = getCredentialParam('password', credentialData, nodeData)
if (cloudId.startsWith('http')) {
elasticSearchClientOptions = {
node: cloudId,
auth: {
username: username,
password: password
},
tls: {
rejectUnauthorized: false
}
}
} else {
elasticSearchClientOptions = {
cloud: {
id: cloudId
},
auth: {
username: username,
password: password
}
}
}
}
return elasticSearchClientOptions
}
protected prepareClientArgs(
endPoint: string | undefined,
cloudId: string | undefined,
credentialData: ICommonObject,
nodeData: INodeData,
similarityMeasure: string,
indexName: string
) {
let elasticSearchClientOptions = this.prepareConnectionOptions(endPoint, cloudId, credentialData, nodeData)
let vectorSearchOptions = {}
switch (similarityMeasure) {
case 'dot_product':
vectorSearchOptions = {
similarity: 'dot_product'
}
break
case 'cosine':
vectorSearchOptions = {
similarity: 'cosine'
}
break
default:
vectorSearchOptions = {
similarity: 'l2_norm'
}
}
const elasticSearchClientArgs: ElasticClientArgs = {
client: new Client(elasticSearchClientOptions),
indexName: indexName,
vectorSearchOptions: vectorSearchOptions
}
return elasticSearchClientArgs
}
}

View File

@ -31,7 +31,6 @@ class Elasticsearch_VectorStores implements INode {
this.icon = 'elasticsearch.png'
this.category = 'Vector Stores'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,30 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { ElasticClientArgs, ElasticVectorSearch } from '@langchain/community/vectorstores/elasticsearch'
import { VectorStore } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
import { ElasticSearchBase } from './ElasticSearchBase'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
class ElasicsearchExisting_VectorStores extends ElasticSearchBase implements INode {
constructor() {
super()
this.label = 'Elasticsearch Load Existing Index'
this.name = 'ElasticsearchIndex'
this.version = 1.0
this.description = 'Load existing index from Elasticsearch (i.e: Document has been upserted)'
}
async constructVectorStore(
embeddings: Embeddings,
elasticSearchClientArgs: ElasticClientArgs,
_: Document<Record<string, any>>[] | undefined
): Promise<VectorStore> {
return await ElasticVectorSearch.fromExistingIndex(embeddings, elasticSearchClientArgs)
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return super.init(nodeData, _, options, undefined)
}
}
module.exports = { nodeClass: ElasicsearchExisting_VectorStores }

View File

@ -1,56 +0,0 @@
import { flatten } from 'lodash'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { VectorStore } from '@langchain/core/vectorstores'
import { ElasticClientArgs, ElasticVectorSearch } from '@langchain/community/vectorstores/elasticsearch'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
import { ElasticSearchBase } from './ElasticSearchBase'
class ElasicsearchUpsert_VectorStores extends ElasticSearchBase implements INode {
constructor() {
super()
this.label = 'Elasticsearch Upsert Document'
this.name = 'ElasticsearchUpsert'
this.version = 1.0
this.description = 'Upsert documents to Elasticsearch'
this.inputs.unshift({
label: 'Document',
name: 'document',
type: 'Document',
list: true
})
}
async constructVectorStore(
embeddings: Embeddings,
elasticSearchClientArgs: ElasticClientArgs,
docs: Document<Record<string, any>>[]
): Promise<VectorStore> {
const vectorStore = new ElasticVectorSearch(embeddings, elasticSearchClientArgs)
await vectorStore.addDocuments(docs)
return vectorStore
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
// The following code is a workaround for a bug (Langchain Issue #1589) in the underlying library.
// Store does not support object in metadata and fail silently
finalDocs.forEach((d) => {
delete d.metadata.pdf
delete d.metadata.loc
})
// end of workaround
return super.init(nodeData, _, options, finalDocs)
}
}
module.exports = { nodeClass: ElasicsearchUpsert_VectorStores }

View File

@ -27,7 +27,6 @@ class Faiss_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity search upon query using Faiss library from Meta'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.inputs = [
{
label: 'Document',

View File

@ -1,104 +0,0 @@
import { FaissStore } from '@langchain/community/vectorstores/faiss'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
class Faiss_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Faiss Load Existing Index'
this.name = 'faissExistingIndex'
this.version = 1.0
this.type = 'Faiss'
this.icon = 'faiss.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Faiss (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Base Path to load',
name: 'basePath',
description: 'Path to load faiss.index file',
placeholder: `C:\\Users\\User\\Desktop`,
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Faiss Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Faiss Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(FaissStore)]
}
]
}
async init(nodeData: INodeData): Promise<any> {
const embeddings = nodeData.inputs?.embeddings as Embeddings
const basePath = nodeData.inputs?.basePath as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const vectorStore = await FaissStore.load(basePath, embeddings)
// Avoid illegal invocation error
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number) => {
const index = vectorStore.index
if (k > index.ntotal()) {
const total = index.ntotal()
console.warn(`k (${k}) is greater than the number of elements in the index (${total}), setting k to ${total}`)
k = total
}
const result = index.search(query, k)
return result.labels.map((id, index) => {
const uuid = vectorStore._mapping[id]
return [vectorStore.docstore.search(uuid), result.distances[index]] as [Document, number]
})
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Faiss_Existing_VectorStores }

View File

@ -1,121 +0,0 @@
import { flatten } from 'lodash'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { FaissStore } from '@langchain/community/vectorstores/faiss'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
class FaissUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Faiss Upsert Document'
this.name = 'faissUpsert'
this.version = 1.0
this.type = 'Faiss'
this.icon = 'faiss.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Faiss'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Base Path to store',
name: 'basePath',
description: 'Path to store faiss.index file',
placeholder: `C:\\Users\\User\\Desktop`,
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Faiss Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Faiss Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(FaissStore)]
}
]
}
async init(nodeData: INodeData): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const basePath = nodeData.inputs?.basePath as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const vectorStore = await FaissStore.fromDocuments(finalDocs, embeddings)
await vectorStore.save(basePath)
// Avoid illegal invocation error
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number) => {
const index = vectorStore.index
if (k > index.ntotal()) {
const total = index.ntotal()
console.warn(`k (${k}) is greater than the number of elements in the index (${total}), setting k to ${total}`)
k = total
}
const result = index.search(query, k)
return result.labels.map((id, index) => {
const uuid = vectorStore._mapping[id]
return [vectorStore.docstore.search(uuid), result.distances[index]] as [Document, number]
})
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: FaissUpsert_VectorStores }

View File

@ -33,7 +33,6 @@ class Milvus_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using Milvus, world's most advanced open-source vector database`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,210 +0,0 @@
import { DataType, ErrorCode } from '@zilliz/milvus2-sdk-node'
import { MilvusLibArgs, Milvus } from '@langchain/community/vectorstores/milvus'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Milvus_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Milvus Load Existing collection'
this.name = 'milvusExistingCollection'
this.version = 2.0
this.type = 'Milvus'
this.icon = 'milvus.svg'
this.category = 'Vector Stores'
this.description = 'Load existing collection from Milvus (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
optional: true,
credentialNames: ['milvusAuth']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Milvus Server URL',
name: 'milvusServerUrl',
type: 'string',
placeholder: 'http://localhost:19530'
},
{
label: 'Milvus Collection Name',
name: 'milvusCollection',
type: 'string'
},
{
label: 'Milvus Filter',
name: 'milvusFilter',
type: 'string',
optional: true,
description:
'Filter data with a simple string query. Refer Milvus <a target="_blank" href="https://milvus.io/blog/2022-08-08-How-to-use-string-data-to-empower-your-similarity-search-applications.md#Hybrid-search">docs</a> for more details.',
placeholder: 'doc=="a"',
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Milvus Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Milvus Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(Milvus)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
// server setup
const address = nodeData.inputs?.milvusServerUrl as string
const collectionName = nodeData.inputs?.milvusCollection as string
const milvusFilter = nodeData.inputs?.milvusFilter as string
// embeddings
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
// output
const output = nodeData.outputs?.output as string
// format data
const k = topK ? parseInt(topK, 10) : 4
// credential
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const milvusUser = getCredentialParam('milvusUser', credentialData, nodeData)
const milvusPassword = getCredentialParam('milvusPassword', credentialData, nodeData)
// init MilvusLibArgs
const milVusArgs: MilvusLibArgs = {
url: address,
collectionName: collectionName
}
if (milvusUser) milVusArgs.username = milvusUser
if (milvusPassword) milVusArgs.password = milvusPassword
const vectorStore = await Milvus.fromExistingCollection(embeddings, milVusArgs)
// Avoid Illegal Invocation
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number, filter?: string) => {
const hasColResp = await vectorStore.client.hasCollection({
collection_name: vectorStore.collectionName
})
if (hasColResp.status.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error checking collection: ${hasColResp}`)
}
if (hasColResp.value === false) {
throw new Error(`Collection not found: ${vectorStore.collectionName}, please create collection before search.`)
}
const filterStr = milvusFilter ?? filter ?? ''
await vectorStore.grabCollectionFields()
const loadResp = await vectorStore.client.loadCollectionSync({
collection_name: vectorStore.collectionName
})
if (loadResp.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error loading collection: ${loadResp}`)
}
const outputFields = vectorStore.fields.filter((field) => field !== vectorStore.vectorField)
const searchResp = await vectorStore.client.search({
collection_name: vectorStore.collectionName,
search_params: {
anns_field: vectorStore.vectorField,
topk: k.toString(),
metric_type: vectorStore.indexCreateParams.metric_type,
params: vectorStore.indexSearchParams
},
output_fields: outputFields,
vector_type: DataType.FloatVector,
vectors: [query],
filter: filterStr
})
if (searchResp.status.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error searching data: ${JSON.stringify(searchResp)}`)
}
const results: [Document, number][] = []
searchResp.results.forEach((result) => {
const fields = {
pageContent: '',
metadata: {} as Record<string, any>
}
Object.keys(result).forEach((key) => {
if (key === vectorStore.textField) {
fields.pageContent = result[key]
} else if (vectorStore.fields.includes(key) || key === vectorStore.primaryField) {
if (typeof result[key] === 'string') {
const { isJson, obj } = checkJsonString(result[key])
fields.metadata[key] = isJson ? obj : result[key]
} else {
fields.metadata[key] = result[key]
}
}
})
results.push([new Document(fields), result.score])
})
return results
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (milvusFilter) {
;(vectorStore as any).filter = milvusFilter
}
return vectorStore
}
return vectorStore
}
}
function checkJsonString(value: string): { isJson: boolean; obj: any } {
try {
const result = JSON.parse(value)
return { isJson: true, obj: result }
} catch (e) {
return { isJson: false, obj: null }
}
}
module.exports = { nodeClass: Milvus_Existing_VectorStores }

View File

@ -1,285 +0,0 @@
import { flatten } from 'lodash'
import { DataType, ErrorCode, MetricType, IndexType } from '@zilliz/milvus2-sdk-node'
import { MilvusLibArgs, Milvus } from '@langchain/community/vectorstores/milvus'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
interface InsertRow {
[x: string]: string | number[]
}
class Milvus_Upsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Milvus Upsert Document'
this.name = 'milvusUpsert'
this.version = 1.0
this.type = 'Milvus'
this.icon = 'milvus.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Milvus'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
optional: true,
credentialNames: ['milvusAuth']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Milvus Server URL',
name: 'milvusServerUrl',
type: 'string',
placeholder: 'http://localhost:19530'
},
{
label: 'Milvus Collection Name',
name: 'milvusCollection',
type: 'string'
}
]
this.outputs = [
{
label: 'Milvus Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Milvus Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(Milvus)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
// server setup
const address = nodeData.inputs?.milvusServerUrl as string
const collectionName = nodeData.inputs?.milvusCollection as string
// embeddings
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
// output
const output = nodeData.outputs?.output as string
// format data
const k = topK ? parseInt(topK, 10) : 4
// credential
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const milvusUser = getCredentialParam('milvusUser', credentialData, nodeData)
const milvusPassword = getCredentialParam('milvusPassword', credentialData, nodeData)
// init MilvusLibArgs
const milVusArgs: MilvusLibArgs = {
url: address,
collectionName: collectionName
}
if (milvusUser) milVusArgs.username = milvusUser
if (milvusPassword) milVusArgs.password = milvusPassword
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const vectorStore = await MilvusUpsert.fromDocuments(finalDocs, embeddings, milVusArgs)
// Avoid Illegal Invocation
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number, filter?: string) => {
const hasColResp = await vectorStore.client.hasCollection({
collection_name: vectorStore.collectionName
})
if (hasColResp.status.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error checking collection: ${hasColResp}`)
}
if (hasColResp.value === false) {
throw new Error(`Collection not found: ${vectorStore.collectionName}, please create collection before search.`)
}
const filterStr = filter ?? ''
await vectorStore.grabCollectionFields()
const loadResp = await vectorStore.client.loadCollectionSync({
collection_name: vectorStore.collectionName
})
if (loadResp.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error loading collection: ${loadResp}`)
}
const outputFields = vectorStore.fields.filter((field) => field !== vectorStore.vectorField)
const searchResp = await vectorStore.client.search({
collection_name: vectorStore.collectionName,
search_params: {
anns_field: vectorStore.vectorField,
topk: k.toString(),
metric_type: vectorStore.indexCreateParams.metric_type,
params: vectorStore.indexSearchParams
},
output_fields: outputFields,
vector_type: DataType.FloatVector,
vectors: [query],
filter: filterStr
})
if (searchResp.status.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error searching data: ${JSON.stringify(searchResp)}`)
}
const results: [Document, number][] = []
searchResp.results.forEach((result) => {
const fields = {
pageContent: '',
metadata: {} as Record<string, any>
}
Object.keys(result).forEach((key) => {
if (key === vectorStore.textField) {
fields.pageContent = result[key]
} else if (vectorStore.fields.includes(key) || key === vectorStore.primaryField) {
if (typeof result[key] === 'string') {
const { isJson, obj } = checkJsonString(result[key])
fields.metadata[key] = isJson ? obj : result[key]
} else {
fields.metadata[key] = result[key]
}
}
})
results.push([new Document(fields), result.score])
})
return results
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
function checkJsonString(value: string): { isJson: boolean; obj: any } {
try {
const result = JSON.parse(value)
return { isJson: true, obj: result }
} catch (e) {
return { isJson: false, obj: null }
}
}
class MilvusUpsert extends Milvus {
async addVectors(vectors: number[][], documents: Document[]): Promise<void> {
if (vectors.length === 0) {
return
}
await this.ensureCollection(vectors, documents)
const insertDatas: InsertRow[] = []
for (let index = 0; index < vectors.length; index++) {
const vec = vectors[index]
const doc = documents[index]
const data: InsertRow = {
[this.textField]: doc.pageContent,
[this.vectorField]: vec
}
this.fields.forEach((field) => {
switch (field) {
case this.primaryField:
if (!this.autoId) {
if (doc.metadata[this.primaryField] === undefined) {
throw new Error(
`The Collection's primaryField is configured with autoId=false, thus its value must be provided through metadata.`
)
}
data[field] = doc.metadata[this.primaryField]
}
break
case this.textField:
data[field] = doc.pageContent
break
case this.vectorField:
data[field] = vec
break
default: // metadata fields
if (doc.metadata[field] === undefined) {
throw new Error(`The field "${field}" is not provided in documents[${index}].metadata.`)
} else if (typeof doc.metadata[field] === 'object') {
data[field] = JSON.stringify(doc.metadata[field])
} else {
data[field] = doc.metadata[field]
}
break
}
})
insertDatas.push(data)
}
const descIndexResp = await this.client.describeIndex({
collection_name: this.collectionName
})
if (descIndexResp.status.error_code === ErrorCode.IndexNotExist) {
const resp = await this.client.createIndex({
collection_name: this.collectionName,
field_name: this.vectorField,
index_name: `myindex_${Date.now().toString()}`,
index_type: IndexType.AUTOINDEX,
metric_type: MetricType.L2
})
if (resp.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error creating index`)
}
}
const insertResp = await this.client.insert({
collection_name: this.collectionName,
fields_data: insertDatas
})
if (insertResp.status.error_code !== ErrorCode.SUCCESS) {
throw new Error(`Error inserting data: ${JSON.stringify(insertResp)}`)
}
await this.client.flushSync({ collection_names: [this.collectionName] })
}
}
module.exports = { nodeClass: Milvus_Upsert_VectorStores }

View File

@ -30,7 +30,6 @@ class MongoDBAtlas_VectorStores implements INode {
this.icon = 'mongodb.svg'
this.category = 'Vector Stores'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,146 +0,0 @@
import {
getBaseClasses,
getCredentialData,
getCredentialParam,
ICommonObject,
INodeData,
INodeOutputsValue,
INodeParams
} from '../../../src'
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStore } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
import { MongoDBAtlasVectorSearch } from '@langchain/community/vectorstores/mongodb_atlas'
import { Collection, MongoClient } from 'mongodb'
export abstract class MongoDBSearchBase {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
mongoClient: MongoClient
protected constructor() {
this.type = 'MongoDB Atlas'
this.icon = 'mongodb.svg'
this.category = 'Vector Stores'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['mongoDBUrlApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Database',
name: 'databaseName',
placeholder: '<DB_NAME>',
type: 'string'
},
{
label: 'Collection Name',
name: 'collectionName',
placeholder: '<COLLECTION_NAME>',
type: 'string'
},
{
label: 'Index Name',
name: 'indexName',
placeholder: '<VECTOR_INDEX_NAME>',
type: 'string'
},
{
label: 'Content Field',
name: 'textKey',
description: 'Name of the field (column) that contains the actual content',
type: 'string',
default: 'text',
additionalParams: true,
optional: true
},
{
label: 'Embedded Field',
name: 'embeddingKey',
description: 'Name of the field (column) that contains the Embedding',
type: 'string',
default: 'embedding',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'MongoDB Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'MongoDB Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(MongoDBAtlasVectorSearch)]
}
]
}
abstract constructVectorStore(
embeddings: Embeddings,
collection: Collection,
indexName: string,
textKey: string,
embeddingKey: string,
docs: Document<Record<string, any>>[] | undefined
): Promise<VectorStore>
async init(nodeData: INodeData, _: string, options: ICommonObject, docs: Document<Record<string, any>>[] | undefined): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const databaseName = nodeData.inputs?.databaseName as string
const collectionName = nodeData.inputs?.collectionName as string
const indexName = nodeData.inputs?.indexName as string
let textKey = nodeData.inputs?.textKey as string
let embeddingKey = nodeData.inputs?.embeddingKey as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
this.mongoClient = new MongoClient(mongoDBConnectUrl)
const collection = this.mongoClient.db(databaseName).collection(collectionName)
if (!textKey || textKey === '') textKey = 'text'
if (!embeddingKey || embeddingKey === '') embeddingKey = 'embedding'
const vectorStore = await this.constructVectorStore(embeddings, collection, indexName, textKey, embeddingKey, docs)
if (output === 'retriever') {
return vectorStore.asRetriever(k)
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}

View File

@ -1,39 +0,0 @@
import { Collection } from 'mongodb'
import { MongoDBAtlasVectorSearch } from '@langchain/community/vectorstores/mongodb_atlas'
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStore } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
import { MongoDBSearchBase } from './MongoDBSearchBase'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
class MongoDBExisting_VectorStores extends MongoDBSearchBase implements INode {
constructor() {
super()
this.label = 'MongoDB Atlas Load Existing Index'
this.name = 'MongoDBIndex'
this.version = 1.0
this.description = 'Load existing data from MongoDB Atlas (i.e: Document has been upserted)'
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return super.init(nodeData, _, options, undefined)
}
async constructVectorStore(
embeddings: Embeddings,
collection: Collection,
indexName: string,
textKey: string,
embeddingKey: string,
_: Document<Record<string, any>>[] | undefined
): Promise<VectorStore> {
return new MongoDBAtlasVectorSearch(embeddings, {
collection: collection,
indexName: indexName,
textKey: textKey,
embeddingKey: embeddingKey
})
}
}
module.exports = { nodeClass: MongoDBExisting_VectorStores }

View File

@ -1,59 +0,0 @@
import { flatten } from 'lodash'
import { Collection } from 'mongodb'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { VectorStore } from '@langchain/core/vectorstores'
import { MongoDBAtlasVectorSearch } from '@langchain/community/vectorstores/mongodb_atlas'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
import { MongoDBSearchBase } from './MongoDBSearchBase'
class MongoDBUpsert_VectorStores extends MongoDBSearchBase implements INode {
constructor() {
super()
this.label = 'MongoDB Atlas Upsert Document'
this.name = 'MongoDBUpsert'
this.version = 1.0
this.description = 'Upsert documents to MongoDB Atlas'
this.inputs.unshift({
label: 'Document',
name: 'document',
type: 'Document',
list: true
})
}
async constructVectorStore(
embeddings: Embeddings,
collection: Collection,
indexName: string,
textKey: string,
embeddingKey: string,
docs: Document<Record<string, any>>[]
): Promise<VectorStore> {
const mongoDBAtlasVectorSearch = new MongoDBAtlasVectorSearch(embeddings, {
collection: collection,
indexName: indexName,
textKey: textKey,
embeddingKey: embeddingKey
})
await mongoDBAtlasVectorSearch.addDocuments(docs)
return mongoDBAtlasVectorSearch
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
const document = new Document(flattenDocs[i])
finalDocs.push(document)
}
}
return super.init(nodeData, _, options, finalDocs)
}
}
module.exports = { nodeClass: MongoDBUpsert_VectorStores }

View File

@ -29,7 +29,6 @@ class OpenSearch_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using OpenSearch, an open-source, all-in-one vector database`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,116 +0,0 @@
import { OpenSearchVectorStore } from '@langchain/community/vectorstores/opensearch'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { Client } from '@opensearch-project/opensearch'
import { flatten } from 'lodash'
import { getBaseClasses } from '../../../src/utils'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class OpenSearchUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'OpenSearch Upsert Document'
this.name = 'openSearchUpsertDocument'
this.version = 1.0
this.type = 'OpenSearch'
this.icon = 'opensearch.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to OpenSearch'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'OpenSearch URL',
name: 'opensearchURL',
type: 'string',
placeholder: 'http://127.0.0.1:9200'
},
{
label: 'Index Name',
name: 'indexName',
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'OpenSearch Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'OpenSearch Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(OpenSearchVectorStore)]
}
]
}
async init(nodeData: INodeData): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const opensearchURL = nodeData.inputs?.opensearchURL as string
const indexName = nodeData.inputs?.indexName as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const client = new Client({
nodes: [opensearchURL]
})
const vectorStore = await OpenSearchVectorStore.fromDocuments(finalDocs, embeddings, {
client,
indexName: indexName
})
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: OpenSearchUpsert_VectorStores }

View File

@ -1,99 +0,0 @@
import { OpenSearchVectorStore } from '@langchain/community/vectorstores/opensearch'
import { Embeddings } from '@langchain/core/embeddings'
import { Client } from '@opensearch-project/opensearch'
import { getBaseClasses } from '../../../src/utils'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class OpenSearch_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'OpenSearch Load Existing Index'
this.name = 'openSearchExistingIndex'
this.version = 1.0
this.type = 'OpenSearch'
this.icon = 'opensearch.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from OpenSearch (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'OpenSearch URL',
name: 'opensearchURL',
type: 'string',
placeholder: 'http://127.0.0.1:9200'
},
{
label: 'Index Name',
name: 'indexName',
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'OpenSearch Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'OpenSearch Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(OpenSearchVectorStore)]
}
]
}
async init(nodeData: INodeData): Promise<any> {
const embeddings = nodeData.inputs?.embeddings as Embeddings
const opensearchURL = nodeData.inputs?.opensearchURL as string
const indexName = nodeData.inputs?.indexName as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const client = new Client({
nodes: [opensearchURL]
})
const vectorStore = new OpenSearchVectorStore(embeddings, {
client,
indexName
})
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: OpenSearch_Existing_VectorStores }

View File

@ -48,7 +48,6 @@ class Pinecone_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,128 +0,0 @@
import { Pinecone } from '@pinecone-database/pinecone'
import { PineconeStoreParams, PineconeStore } from '@langchain/pinecone'
import { Embeddings } from '@langchain/core/embeddings'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Pinecone_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Pinecone Load Existing Index'
this.name = 'pineconeExistingIndex'
this.version = 1.0
this.type = 'Pinecone'
this.icon = 'pinecone.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Pinecone (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['pineconeApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Pinecone Index',
name: 'pineconeIndex',
type: 'string'
},
{
label: 'Pinecone Namespace',
name: 'pineconeNamespace',
type: 'string',
placeholder: 'my-first-namespace',
additionalParams: true,
optional: true
},
{
label: 'Pinecone Metadata Filter',
name: 'pineconeMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Pinecone Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Pinecone Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(PineconeStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const index = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)
const obj: PineconeStoreParams = {
pineconeIndex
}
if (pineconeNamespace) obj.namespace = pineconeNamespace
if (pineconeMetadataFilter) {
const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter)
obj.filter = metadatafilter
}
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Pinecone_Existing_VectorStores }

View File

@ -1,133 +0,0 @@
import { flatten } from 'lodash'
import { Pinecone } from '@pinecone-database/pinecone'
import { PineconeStoreParams, PineconeStore } from '@langchain/pinecone'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class PineconeUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Pinecone Upsert Document'
this.name = 'pineconeUpsert'
this.version = 1.0
this.type = 'Pinecone'
this.icon = 'pinecone.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Pinecone'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['pineconeApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Pinecone Index',
name: 'pineconeIndex',
type: 'string'
},
{
label: 'Pinecone Namespace',
name: 'pineconeNamespace',
type: 'string',
placeholder: 'my-first-namespace',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Pinecone Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Pinecone Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(PineconeStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const index = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const obj: PineconeStoreParams = {
pineconeIndex
}
if (pineconeNamespace) obj.namespace = pineconeNamespace
const vectorStore = await PineconeStore.fromDocuments(finalDocs, embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: PineconeUpsert_VectorStores }

View File

@ -31,7 +31,6 @@ class Postgres_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity search upon query using pgvector on Postgres'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,202 +0,0 @@
import { DataSourceOptions } from 'typeorm'
import { Pool } from 'pg'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { TypeORMVectorStore, TypeORMVectorStoreDocument } from '@langchain/community/vectorstores/typeorm'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class Postgres_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Postgres Load Existing Index'
this.name = 'postgresExistingIndex'
this.version = 2.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Postgres using pgvector (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['PostgresApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Host',
name: 'host',
type: 'string'
},
{
label: 'Database',
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
type: 'number',
placeholder: '6432',
optional: true
},
{
label: 'Table Name',
name: 'tableName',
type: 'string',
placeholder: 'documents',
additionalParams: true,
optional: true
},
{
label: 'Additional Configuration',
name: 'additionalConfig',
type: 'json',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Postgres Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Postgres Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(TypeORMVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const user = getCredentialParam('user', credentialData, nodeData)
const password = getCredentialParam('password', credentialData, nodeData)
const _tableName = nodeData.inputs?.tableName as string
const tableName = _tableName ? _tableName : 'documents'
const embeddings = nodeData.inputs?.embeddings as Embeddings
const additionalConfig = nodeData.inputs?.additionalConfig as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
try {
additionalConfiguration = typeof additionalConfig === 'object' ? additionalConfig : JSON.parse(additionalConfig)
} catch (exception) {
throw new Error('Invalid JSON in the Additional Configuration: ' + exception)
}
}
const postgresConnectionOptions = {
...additionalConfiguration,
type: 'postgres',
host: nodeData.inputs?.host as string,
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string,
ssl: sslConnection
}
const args = {
postgresConnectionOptions: postgresConnectionOptions as DataSourceOptions,
tableName: tableName
}
const vectorStore = await TypeORMVectorStore.fromDataSource(embeddings, args)
// Rewrite the method to use pg pool connection instead of the default connection
/* Otherwise a connection error is displayed when the chain tries to execute the function
[chain/start] [1:chain:ConversationalRetrievalQAChain] Entering Chain run with input: { "question": "what the document is about", "chat_history": [] }
[retriever/start] [1:chain:ConversationalRetrievalQAChain > 2:retriever:VectorStoreRetriever] Entering Retriever run with input: { "query": "what the document is about" }
[ERROR]: uncaughtException: Illegal invocation TypeError: Illegal invocation at Socket.ref (node:net:1524:18) at Connection.ref (.../node_modules/pg/lib/connection.js:183:17) at Client.ref (.../node_modules/pg/lib/client.js:591:21) at BoundPool._pulseQueue (/node_modules/pg-pool/index.js:148:28) at .../node_modules/pg-pool/index.js:184:37 at process.processTicksAndRejections (node:internal/process/task_queues:77:11)
*/
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number, filter?: any) => {
const embeddingString = `[${query.join(',')}]`
const _filter = filter ?? '{}'
const queryString = `
SELECT *, embedding <=> $1 as "_distance"
FROM ${tableName}
WHERE metadata @> $2
ORDER BY "_distance" ASC
LIMIT $3;`
const poolOptions = {
host: postgresConnectionOptions.host,
port: postgresConnectionOptions.port,
user: postgresConnectionOptions.username,
password: postgresConnectionOptions.password,
database: postgresConnectionOptions.database
}
const pool = new Pool(poolOptions)
const conn = await pool.connect()
const documents = await conn.query(queryString, [embeddingString, _filter, k])
conn.release()
const results = [] as [TypeORMVectorStoreDocument, number][]
for (const doc of documents.rows) {
if (doc._distance != null && doc.pageContent != null) {
const document = new Document(doc) as TypeORMVectorStoreDocument
document.id = doc.id
results.push([document, doc._distance])
}
}
return results
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Postgres_Existing_VectorStores }

View File

@ -1,218 +0,0 @@
import { DataSourceOptions } from 'typeorm'
import { flatten } from 'lodash'
import { Pool } from 'pg'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { TypeORMVectorStore, TypeORMVectorStoreDocument } from '@langchain/community/vectorstores/typeorm'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class PostgresUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Postgres Upsert Document'
this.name = 'postgresUpsert'
this.version = 2.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Postgres using pgvector'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['PostgresApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Host',
name: 'host',
type: 'string'
},
{
label: 'Database',
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
type: 'number',
placeholder: '6432',
optional: true
},
{
label: 'Table Name',
name: 'tableName',
type: 'string',
placeholder: 'documents',
additionalParams: true,
optional: true
},
{
label: 'Additional Configuration',
name: 'additionalConfig',
type: 'json',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Postgres Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Postgres Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(TypeORMVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const user = getCredentialParam('user', credentialData, nodeData)
const password = getCredentialParam('password', credentialData, nodeData)
const _tableName = nodeData.inputs?.tableName as string
const tableName = _tableName ? _tableName : 'documents'
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const additionalConfig = nodeData.inputs?.additionalConfig as string
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
try {
additionalConfiguration = typeof additionalConfig === 'object' ? additionalConfig : JSON.parse(additionalConfig)
} catch (exception) {
throw new Error('Invalid JSON in the Additional Configuration: ' + exception)
}
}
const postgresConnectionOptions = {
...additionalConfiguration,
type: 'postgres',
host: nodeData.inputs?.host as string,
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string,
ssl: sslConnection
}
const args = {
postgresConnectionOptions: postgresConnectionOptions as DataSourceOptions,
tableName: tableName
}
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const vectorStore = await TypeORMVectorStore.fromDocuments(finalDocs, embeddings, args)
// Rewrite the method to use pg pool connection instead of the default connection
/* Otherwise a connection error is displayed when the chain tries to execute the function
[chain/start] [1:chain:ConversationalRetrievalQAChain] Entering Chain run with input: { "question": "what the document is about", "chat_history": [] }
[retriever/start] [1:chain:ConversationalRetrievalQAChain > 2:retriever:VectorStoreRetriever] Entering Retriever run with input: { "query": "what the document is about" }
[ERROR]: uncaughtException: Illegal invocation TypeError: Illegal invocation at Socket.ref (node:net:1524:18) at Connection.ref (.../node_modules/pg/lib/connection.js:183:17) at Client.ref (.../node_modules/pg/lib/client.js:591:21) at BoundPool._pulseQueue (/node_modules/pg-pool/index.js:148:28) at .../node_modules/pg-pool/index.js:184:37 at process.processTicksAndRejections (node:internal/process/task_queues:77:11)
*/
vectorStore.similaritySearchVectorWithScore = async (query: number[], k: number, filter?: any) => {
const embeddingString = `[${query.join(',')}]`
const _filter = filter ?? '{}'
const queryString = `
SELECT *, embedding <=> $1 as "_distance"
FROM ${tableName}
WHERE metadata @> $2
ORDER BY "_distance" ASC
LIMIT $3;`
const poolOptions = {
host: postgresConnectionOptions.host,
port: postgresConnectionOptions.port,
user: postgresConnectionOptions.username,
password: postgresConnectionOptions.password,
database: postgresConnectionOptions.database
}
const pool = new Pool(poolOptions)
const conn = await pool.connect()
const documents = await conn.query(queryString, [embeddingString, _filter, k])
conn.release()
const results = [] as [TypeORMVectorStoreDocument, number][]
for (const doc of documents.rows) {
if (doc._distance != null && doc.pageContent != null) {
const document = new Document(doc) as TypeORMVectorStoreDocument
document.id = doc.id
results.push([document, doc._distance])
}
}
return results
}
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: PostgresUpsert_VectorStores }

View File

@ -39,7 +39,6 @@ class Qdrant_VectorStores implements INode {
this.description =
'Upsert embedded data and perform similarity search upon query using Qdrant, a scalable open source vector database written in Rust'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,194 +0,0 @@
import { QdrantClient } from '@qdrant/js-client-rest'
import { QdrantVectorStore, QdrantLibArgs } from '@langchain/community/vectorstores/qdrant'
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStoreRetrieverInput } from '@langchain/core/vectorstores'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
type RetrieverConfig = Partial<VectorStoreRetrieverInput<QdrantVectorStore>>
class Qdrant_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Qdrant Load Existing Index'
this.name = 'qdrantExistingIndex'
this.version = 2.0
this.type = 'Qdrant'
this.icon = 'qdrant.png'
this.category = 'Vector Stores'
this.description = 'Load existing index from Qdrant (i.e., documents have been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed when using Qdrant cloud hosted',
optional: true,
credentialNames: ['qdrantApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Qdrant Server URL',
name: 'qdrantServerUrl',
type: 'string',
placeholder: 'http://localhost:6333'
},
{
label: 'Qdrant Collection Name',
name: 'qdrantCollection',
type: 'string'
},
{
label: 'Vector Dimension',
name: 'qdrantVectorDimension',
type: 'number',
default: 1536,
additionalParams: true
},
{
label: 'Similarity',
name: 'qdrantSimilarity',
description: 'Similarity measure used in Qdrant.',
type: 'options',
default: 'Cosine',
options: [
{
label: 'Cosine',
name: 'Cosine'
},
{
label: 'Euclid',
name: 'Euclid'
},
{
label: 'Dot',
name: 'Dot'
}
],
additionalParams: true
},
{
label: 'Additional Collection Cofiguration',
name: 'qdrantCollectionConfiguration',
description:
'Refer to <a target="_blank" href="https://qdrant.tech/documentation/concepts/collections">collection docs</a> for more reference',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Qdrant Search Filter',
name: 'qdrantFilter',
description: 'Only return points which satisfy the conditions',
type: 'json',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Qdrant Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Qdrant Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(QdrantVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const qdrantServerUrl = nodeData.inputs?.qdrantServerUrl as string
const collectionName = nodeData.inputs?.qdrantCollection as string
let qdrantCollectionConfiguration = nodeData.inputs?.qdrantCollectionConfiguration
const embeddings = nodeData.inputs?.embeddings as Embeddings
const qdrantSimilarity = nodeData.inputs?.qdrantSimilarity
const qdrantVectorDimension = nodeData.inputs?.qdrantVectorDimension
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
let queryFilter = nodeData.inputs?.qdrantFilter
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData)
const client = new QdrantClient({
url: qdrantServerUrl,
apiKey: qdrantApiKey
})
const dbConfig: QdrantLibArgs = {
client,
collectionName
}
const retrieverConfig: RetrieverConfig = {
k
}
if (qdrantCollectionConfiguration) {
qdrantCollectionConfiguration =
typeof qdrantCollectionConfiguration === 'object'
? qdrantCollectionConfiguration
: JSON.parse(qdrantCollectionConfiguration)
dbConfig.collectionConfig = {
...qdrantCollectionConfiguration,
vectors: {
...qdrantCollectionConfiguration.vectors,
size: qdrantVectorDimension ? parseInt(qdrantVectorDimension, 10) : 1536,
distance: qdrantSimilarity ?? 'Cosine'
}
}
}
if (queryFilter) {
retrieverConfig.filter = typeof queryFilter === 'object' ? queryFilter : JSON.parse(queryFilter)
}
const vectorStore = await QdrantVectorStore.fromExistingCollection(embeddings, dbConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(retrieverConfig)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (queryFilter) {
;(vectorStore as any).filter = retrieverConfig.filter
}
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Qdrant_Existing_VectorStores }

View File

@ -1,213 +0,0 @@
import { QdrantClient } from '@qdrant/js-client-rest'
import { QdrantVectorStore, QdrantLibArgs } from '@langchain/community/vectorstores/qdrant'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { flatten } from 'lodash'
import { VectorStoreRetrieverInput } from '@langchain/core/vectorstores'
type RetrieverConfig = Partial<VectorStoreRetrieverInput<QdrantVectorStore>>
class QdrantUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Qdrant Upsert Document'
this.name = 'qdrantUpsert'
this.version = 3.0
this.type = 'Qdrant'
this.icon = 'qdrant.png'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Qdrant'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed when using Qdrant cloud hosted',
optional: true,
credentialNames: ['qdrantApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Qdrant Server URL',
name: 'qdrantServerUrl',
type: 'string',
placeholder: 'http://localhost:6333'
},
{
label: 'Qdrant Collection Name',
name: 'qdrantCollection',
type: 'string'
},
{
label: 'Vector Dimension',
name: 'qdrantVectorDimension',
type: 'number',
default: 1536,
additionalParams: true
},
{
label: 'Upsert Batch Size',
name: 'batchSize',
type: 'number',
step: 1,
description: 'Upsert in batches of size N',
additionalParams: true,
optional: true
},
{
label: 'Similarity',
name: 'qdrantSimilarity',
description: 'Similarity measure used in Qdrant.',
type: 'options',
default: 'Cosine',
options: [
{
label: 'Cosine',
name: 'Cosine'
},
{
label: 'Euclid',
name: 'Euclid'
},
{
label: 'Dot',
name: 'Dot'
}
],
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Qdrant Search Filter',
name: 'qdrantFilter',
description: 'Only return points which satisfy the conditions',
type: 'json',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Qdrant Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Qdrant Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(QdrantVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const qdrantServerUrl = nodeData.inputs?.qdrantServerUrl as string
const collectionName = nodeData.inputs?.qdrantCollection as string
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const qdrantSimilarity = nodeData.inputs?.qdrantSimilarity
const qdrantVectorDimension = nodeData.inputs?.qdrantVectorDimension
const _batchSize = nodeData.inputs?.batchSize
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
let queryFilter = nodeData.inputs?.qdrantFilter
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData)
const client = new QdrantClient({
url: qdrantServerUrl,
apiKey: qdrantApiKey
})
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const dbConfig: QdrantLibArgs = {
client,
url: qdrantServerUrl,
collectionName,
collectionConfig: {
vectors: {
size: qdrantVectorDimension ? parseInt(qdrantVectorDimension, 10) : 1536,
distance: qdrantSimilarity ?? 'Cosine'
}
}
}
const retrieverConfig: RetrieverConfig = {
k
}
if (queryFilter) {
retrieverConfig.filter = typeof queryFilter === 'object' ? queryFilter : JSON.parse(queryFilter)
}
let vectorStore: QdrantVectorStore | undefined = undefined
if (_batchSize) {
const batchSize = parseInt(_batchSize, 10)
for (let i = 0; i < finalDocs.length; i += batchSize) {
const batch = finalDocs.slice(i, i + batchSize)
vectorStore = await QdrantVectorStore.fromDocuments(batch, embeddings, dbConfig)
}
} else {
vectorStore = await QdrantVectorStore.fromDocuments(finalDocs, embeddings, dbConfig)
}
if (vectorStore === undefined) {
throw new Error('No documents to upsert')
} else {
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(retrieverConfig)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
}
module.exports = { nodeClass: QdrantUpsert_VectorStores }

View File

@ -52,7 +52,6 @@ class Redis_VectorStores implements INode {
this.icon = 'redis.svg'
this.category = 'Vector Stores'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,237 +0,0 @@
import { createClient, SearchOptions, RedisClientOptions } from 'redis'
import { isEqual } from 'lodash'
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStore } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
import { RedisVectorStore } from '@langchain/community/vectorstores/redis'
import { escapeSpecialChars, unEscapeSpecialChars } from './utils'
import {
getBaseClasses,
getCredentialData,
getCredentialParam,
ICommonObject,
INodeData,
INodeOutputsValue,
INodeParams
} from '../../../src'
let redisClientSingleton: ReturnType<typeof createClient>
let redisClientOption: RedisClientOptions
const getRedisClient = async (option: RedisClientOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
export abstract class RedisSearchBase {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
redisClient: ReturnType<typeof createClient>
protected constructor() {
this.type = 'Redis'
this.icon = 'redis.svg'
this.category = 'Vector Stores'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['redisCacheUrlApi', 'redisCacheApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Index Name',
name: 'indexName',
placeholder: '<VECTOR_INDEX_NAME>',
type: 'string'
},
{
label: 'Replace Index?',
name: 'replaceIndex',
description: 'Selecting this option will delete the existing index and recreate a new one',
default: false,
type: 'boolean'
},
{
label: 'Content Field',
name: 'contentKey',
description: 'Name of the field (column) that contains the actual content',
type: 'string',
default: 'content',
additionalParams: true,
optional: true
},
{
label: 'Metadata Field',
name: 'metadataKey',
description: 'Name of the field (column) that contains the metadata of the document',
type: 'string',
default: 'metadata',
additionalParams: true,
optional: true
},
{
label: 'Vector Field',
name: 'vectorKey',
description: 'Name of the field (column) that contains the vector',
type: 'string',
default: 'content_vector',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Redis Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Redis Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(RedisVectorStore)]
}
]
}
abstract constructVectorStore(
embeddings: Embeddings,
indexName: string,
replaceIndex: boolean,
docs: Document<Record<string, any>>[] | undefined
): Promise<VectorStore>
async init(nodeData: INodeData, _: string, options: ICommonObject, docs: Document<Record<string, any>>[] | undefined): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const indexName = nodeData.inputs?.indexName as string
let contentKey = nodeData.inputs?.contentKey as string
let metadataKey = nodeData.inputs?.metadataKey as string
let vectorKey = nodeData.inputs?.vectorKey as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
const replaceIndex = nodeData.inputs?.replaceIndex as boolean
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
let redisUrl = getCredentialParam('redisUrl', credentialData, nodeData)
if (!redisUrl || redisUrl === '') {
const username = getCredentialParam('redisCacheUser', credentialData, nodeData)
const password = getCredentialParam('redisCachePwd', credentialData, nodeData)
const portStr = getCredentialParam('redisCachePort', credentialData, nodeData)
const host = getCredentialParam('redisCacheHost', credentialData, nodeData)
redisUrl = 'redis://' + username + ':' + password + '@' + host + ':' + portStr
}
this.redisClient = await getRedisClient({ url: redisUrl })
const vectorStore = await this.constructVectorStore(embeddings, indexName, replaceIndex, docs)
if (!contentKey || contentKey === '') contentKey = 'content'
if (!metadataKey || metadataKey === '') metadataKey = 'metadata'
if (!vectorKey || vectorKey === '') vectorKey = 'content_vector'
const buildQuery = (query: number[], k: number, filter?: string[]): [string, SearchOptions] => {
const vectorScoreField = 'vector_score'
let hybridFields = '*'
// if a filter is set, modify the hybrid query
if (filter && filter.length) {
// `filter` is a list of strings, then it's applied using the OR operator in the metadata key
hybridFields = `@${metadataKey}:(${filter.map(escapeSpecialChars).join('|')})`
}
const baseQuery = `${hybridFields} => [KNN ${k} @${vectorKey} $vector AS ${vectorScoreField}]`
const returnFields = [metadataKey, contentKey, vectorScoreField]
const options: SearchOptions = {
PARAMS: {
vector: Buffer.from(new Float32Array(query).buffer)
},
RETURN: returnFields,
SORTBY: vectorScoreField,
DIALECT: 2,
LIMIT: {
from: 0,
size: k
}
}
return [baseQuery, options]
}
vectorStore.similaritySearchVectorWithScore = async (
query: number[],
k: number,
filter?: string[]
): Promise<[Document, number][]> => {
const results = await this.redisClient.ft.search(indexName, ...buildQuery(query, k, filter))
const result: [Document, number][] = []
if (results.total) {
for (const res of results.documents) {
if (res.value) {
const document = res.value
if (document.vector_score) {
const metadataString = unEscapeSpecialChars(document[metadataKey] as string)
result.push([
new Document({
pageContent: document[contentKey] as string,
metadata: JSON.parse(metadataString)
}),
Number(document.vector_score)
])
}
}
}
}
return result
}
if (output === 'retriever') {
return vectorStore.asRetriever(k)
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}

View File

@ -1,41 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { VectorStore } from '@langchain/core/vectorstores'
import { RedisVectorStore, RedisVectorStoreConfig } from '@langchain/community/vectorstores/redis'
import { Document } from '@langchain/core/documents'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
import { RedisSearchBase } from './RedisSearchBase'
class RedisExisting_VectorStores extends RedisSearchBase implements INode {
constructor() {
super()
this.label = 'Redis Load Existing Index'
this.name = 'RedisIndex'
this.version = 1.0
this.description = 'Load existing index from Redis (i.e: Document has been upserted)'
// Remove replaceIndex from inputs as it is not applicable while fetching data from Redis
let input = this.inputs.find((i) => i.name === 'replaceIndex')
if (input) this.inputs.splice(this.inputs.indexOf(input), 1)
}
async constructVectorStore(
embeddings: Embeddings,
indexName: string,
// eslint-disable-next-line unused-imports/no-unused-vars
replaceIndex: boolean,
_: Document<Record<string, any>>[]
): Promise<VectorStore> {
const storeConfig: RedisVectorStoreConfig = {
redisClient: this.redisClient,
indexName: indexName
}
return new RedisVectorStore(embeddings, storeConfig)
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return super.init(nodeData, _, options, undefined)
}
}
module.exports = { nodeClass: RedisExisting_VectorStores }

View File

@ -1,62 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { flatten } from 'lodash'
import { VectorStore } from '@langchain/core/vectorstores'
import { RedisVectorStore, RedisVectorStoreConfig } from '@langchain/community/vectorstores/redis'
import { RedisSearchBase } from './RedisSearchBase'
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
import { escapeAllStrings } from './utils'
class RedisUpsert_VectorStores extends RedisSearchBase implements INode {
constructor() {
super()
this.label = 'Redis Upsert Document'
this.name = 'RedisUpsert'
this.version = 1.0
this.description = 'Upsert documents to Redis'
this.inputs.unshift({
label: 'Document',
name: 'document',
type: 'Document',
list: true
})
}
async constructVectorStore(
embeddings: Embeddings,
indexName: string,
replaceIndex: boolean,
docs: Document<Record<string, any>>[]
): Promise<VectorStore> {
const storeConfig: RedisVectorStoreConfig = {
redisClient: this.redisClient,
indexName: indexName
}
if (replaceIndex) {
let response = await this.redisClient.ft.dropIndex(indexName)
if (process.env.DEBUG === 'true') {
// eslint-disable-next-line no-console
console.log(`Redis Vector Store :: Dropping index [${indexName}], Received Response [${response}]`)
}
}
return await RedisVectorStore.fromDocuments(docs, embeddings, storeConfig)
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
const document = new Document(flattenDocs[i])
escapeAllStrings(document.metadata)
finalDocs.push(document)
}
}
return super.init(nodeData, _, options, finalDocs)
}
}
module.exports = { nodeClass: RedisUpsert_VectorStores }

View File

@ -29,7 +29,6 @@ class SingleStore_VectorStores implements INode {
this.description =
'Upsert embedded data and perform similarity search upon query using SingleStore, a fast and distributed cloud relational database'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,148 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { SingleStoreVectorStore, SingleStoreVectorStoreConfig } from '@langchain/community/vectorstores/singlestore'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class SingleStoreExisting_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'SingleStore Load Existing Table'
this.name = 'singlestoreExisting'
this.version = 1.0
this.type = 'SingleStore'
this.icon = 'singlestore.svg'
this.category = 'Vector Stores'
this.description = 'Load existing document from SingleStore'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Needed when using SingleStore cloud hosted',
optional: true,
credentialNames: ['singleStoreApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Host',
name: 'host',
type: 'string'
},
{
label: 'Database',
name: 'database',
type: 'string'
},
{
label: 'Table Name',
name: 'tableName',
type: 'string',
placeholder: 'embeddings',
additionalParams: true,
optional: true
},
{
label: 'Content Column Name',
name: 'contentColumnName',
type: 'string',
placeholder: 'content',
additionalParams: true,
optional: true
},
{
label: 'Vector Column Name',
name: 'vectorColumnName',
type: 'string',
placeholder: 'vector',
additionalParams: true,
optional: true
},
{
label: 'Metadata Column Name',
name: 'metadataColumnName',
type: 'string',
placeholder: 'metadata',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'SingleStore Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'SingleStore Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(SingleStoreVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const user = getCredentialParam('user', credentialData, nodeData)
const password = getCredentialParam('password', credentialData, nodeData)
const singleStoreConnectionConfig = {
connectionOptions: {
host: nodeData.inputs?.host as string,
port: 3306,
user,
password,
database: nodeData.inputs?.database as string
},
...(nodeData.inputs?.tableName ? { tableName: nodeData.inputs.tableName as string } : {}),
...(nodeData.inputs?.contentColumnName ? { contentColumnName: nodeData.inputs.contentColumnName as string } : {}),
...(nodeData.inputs?.vectorColumnName ? { vectorColumnName: nodeData.inputs.vectorColumnName as string } : {}),
...(nodeData.inputs?.metadataColumnName ? { metadataColumnName: nodeData.inputs.metadataColumnName as string } : {})
} as SingleStoreVectorStoreConfig
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
let vectorStore: SingleStoreVectorStore
vectorStore = new SingleStoreVectorStore(embeddings, singleStoreConnectionConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: SingleStoreExisting_VectorStores }

View File

@ -1,166 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { SingleStoreVectorStore, SingleStoreVectorStoreConfig } from '@langchain/community/vectorstores/singlestore'
import { flatten } from 'lodash'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class SingleStoreUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'SingleStore Upsert Document'
this.name = 'singlestoreUpsert'
this.version = 1.0
this.type = 'SingleStore'
this.icon = 'singlestore.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to SingleStore'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Needed when using SingleStore cloud hosted',
optional: true,
credentialNames: ['singleStoreApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Host',
name: 'host',
type: 'string'
},
{
label: 'Database',
name: 'database',
type: 'string'
},
{
label: 'Table Name',
name: 'tableName',
type: 'string',
placeholder: 'embeddings',
additionalParams: true,
optional: true
},
{
label: 'Content Column Name',
name: 'contentColumnName',
type: 'string',
placeholder: 'content',
additionalParams: true,
optional: true
},
{
label: 'Vector Column Name',
name: 'vectorColumnName',
type: 'string',
placeholder: 'vector',
additionalParams: true,
optional: true
},
{
label: 'Metadata Column Name',
name: 'metadataColumnName',
type: 'string',
placeholder: 'metadata',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'SingleStore Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'SingleStore Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(SingleStoreVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const user = getCredentialParam('user', credentialData, nodeData)
const password = getCredentialParam('password', credentialData, nodeData)
const singleStoreConnectionConfig = {
connectionOptions: {
host: nodeData.inputs?.host as string,
port: 3306,
user,
password,
database: nodeData.inputs?.database as string
},
...(nodeData.inputs?.tableName ? { tableName: nodeData.inputs.tableName as string } : {}),
...(nodeData.inputs?.contentColumnName ? { contentColumnName: nodeData.inputs.contentColumnName as string } : {}),
...(nodeData.inputs?.vectorColumnName ? { vectorColumnName: nodeData.inputs.vectorColumnName as string } : {}),
...(nodeData.inputs?.metadataColumnName ? { metadataColumnName: nodeData.inputs.metadataColumnName as string } : {})
} as SingleStoreVectorStoreConfig
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
let vectorStore: SingleStoreVectorStore
vectorStore = new SingleStoreVectorStore(embeddings, singleStoreConnectionConfig)
vectorStore.addDocuments.bind(vectorStore)(finalDocs)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: SingleStoreUpsert_VectorStores }

View File

@ -32,7 +32,6 @@ class Supabase_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity or mmr search upon query using Supabase via pgvector extension'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,131 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { SupabaseLibArgs, SupabaseVectorStore } from '@langchain/community/vectorstores/supabase'
import { createClient } from '@supabase/supabase-js'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Supabase_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Supabase Load Existing Index'
this.name = 'supabaseExistingIndex'
this.version = 1.0
this.type = 'Supabase'
this.icon = 'supabase.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Supabase (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['supabaseApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Supabase Project URL',
name: 'supabaseProjUrl',
type: 'string'
},
{
label: 'Table Name',
name: 'tableName',
type: 'string'
},
{
label: 'Query Name',
name: 'queryName',
type: 'string'
},
{
label: 'Supabase Metadata Filter',
name: 'supabaseMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Supabase Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Supabase Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(SupabaseVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const supabaseProjUrl = nodeData.inputs?.supabaseProjUrl as string
const tableName = nodeData.inputs?.tableName as string
const queryName = nodeData.inputs?.queryName as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const supabaseMetadataFilter = nodeData.inputs?.supabaseMetadataFilter
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const supabaseApiKey = getCredentialParam('supabaseApiKey', credentialData, nodeData)
const client = createClient(supabaseProjUrl, supabaseApiKey)
const obj: SupabaseLibArgs = {
client,
tableName,
queryName
}
if (supabaseMetadataFilter) {
const metadatafilter = typeof supabaseMetadataFilter === 'object' ? supabaseMetadataFilter : JSON.parse(supabaseMetadataFilter)
obj.filter = metadatafilter
}
const vectorStore = await SupabaseVectorStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (supabaseMetadataFilter) {
;(vectorStore as any).filter = obj.filter
}
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Supabase_Existing_VectorStores }

View File

@ -1,157 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { SupabaseVectorStore } from '@langchain/community/vectorstores/supabase'
import { createClient } from '@supabase/supabase-js'
import { flatten } from 'lodash'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class SupabaseUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Supabase Upsert Document'
this.name = 'supabaseUpsert'
this.version = 1.0
this.type = 'Supabase'
this.icon = 'supabase.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Supabase'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['supabaseApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Supabase Project URL',
name: 'supabaseProjUrl',
type: 'string'
},
{
label: 'Table Name',
name: 'tableName',
type: 'string'
},
{
label: 'Query Name',
name: 'queryName',
type: 'string'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Supabase Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Supabase Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(SupabaseVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const supabaseProjUrl = nodeData.inputs?.supabaseProjUrl as string
const tableName = nodeData.inputs?.tableName as string
const queryName = nodeData.inputs?.queryName as string
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const supabaseApiKey = getCredentialParam('supabaseApiKey', credentialData, nodeData)
const client = createClient(supabaseProjUrl, supabaseApiKey)
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
finalDocs.push(new Document(flattenDocs[i]))
}
const vectorStore = await SupabaseUpsertVectorStore.fromDocuments(finalDocs, embeddings, {
client,
tableName: tableName,
queryName: queryName
})
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
class SupabaseUpsertVectorStore extends SupabaseVectorStore {
async addVectors(vectors: number[][], documents: Document[]): Promise<string[]> {
if (vectors.length === 0) {
return []
}
const rows = vectors.map((embedding, idx) => ({
content: documents[idx].pageContent,
embedding,
metadata: documents[idx].metadata
}))
let returnedIds: string[] = []
for (let i = 0; i < rows.length; i += this.upsertBatchSize) {
const chunk = rows.slice(i, i + this.upsertBatchSize).map((row, index) => {
return { id: index, ...row }
})
const res = await this.client.from(this.tableName).upsert(chunk).select()
if (res.error) {
throw new Error(`Error inserting: ${res.error.message} ${res.status} ${res.statusText}`)
}
if (res.data) {
returnedIds = returnedIds.concat(res.data.map((row) => row.id))
}
}
return returnedIds
}
}
module.exports = { nodeClass: SupabaseUpsert_VectorStores }

View File

@ -36,7 +36,6 @@ class Upstash_VectorStores implements INode {
this.description =
'Upsert data as embedding or string and perform similarity search with Upstash, the leading serverless data platform'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -36,7 +36,6 @@ class Vectara_VectorStores implements INode {
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity search upon query using Vectara, a LLM-powered search-as-a-service'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,139 +0,0 @@
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig } from '@langchain/community/vectorstores/vectara'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class VectaraExisting_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Vectara Load Existing Index'
this.name = 'vectaraExistingIndex'
this.version = 1.0
this.type = 'Vectara'
this.icon = 'vectara.png'
this.category = 'Vector Stores'
this.description = 'Load existing index from Vectara (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['vectaraApi']
}
this.inputs = [
{
label: 'Metadata Filter',
name: 'filter',
description:
'Filter to apply to Vectara metadata. Refer to the <a target="_blank" href="https://docs.flowiseai.com/vector-stores/vectara">documentation</a> on how to use Vectara filters with Flowise.',
type: 'string',
additionalParams: true,
optional: true
},
{
label: 'Sentences Before',
name: 'sentencesBefore',
description: 'Number of sentences to fetch before the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Sentences After',
name: 'sentencesAfter',
description: 'Number of sentences to fetch after the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Lambda',
name: 'lambda',
description:
'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Defaults to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Vectara Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Vectara Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(VectaraStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const customerId = getCredentialParam('customerID', credentialData, nodeData)
const corpusId = getCredentialParam('corpusID', credentialData, nodeData).split(',')
const vectaraMetadataFilter = nodeData.inputs?.filter as string
const sentencesBefore = nodeData.inputs?.sentencesBefore as number
const sentencesAfter = nodeData.inputs?.sentencesAfter as number
const lambda = nodeData.inputs?.lambda as number
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseInt(topK, 10) : 4
const vectaraArgs: VectaraLibArgs = {
apiKey: apiKey,
customerId: customerId,
corpusId: corpusId,
source: 'flowise'
}
const vectaraFilter: VectaraFilter = {}
if (vectaraMetadataFilter) vectaraFilter.filter = vectaraMetadataFilter
if (lambda) vectaraFilter.lambda = lambda
const vectaraContextConfig: VectaraContextConfig = {}
if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore
if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter
vectaraFilter.contextConfig = vectaraContextConfig
const vectorStore = new VectaraStore(vectaraArgs)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k, vectaraFilter)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (vectaraMetadataFilter) {
;(vectorStore as any).filter = vectaraFilter.filter
}
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: VectaraExisting_VectorStores }

View File

@ -1,196 +0,0 @@
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile } from '@langchain/community/vectorstores/vectara'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getFileFromStorage } from '../../../src'
class VectaraUpload_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Vectara Upload File'
this.name = 'vectaraUpload'
this.version = 1.0
this.type = 'Vectara'
this.icon = 'vectara.png'
this.category = 'Vector Stores'
this.description = 'Upload files to Vectara'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['vectaraApi']
}
this.inputs = [
{
label: 'File',
name: 'file',
description:
'File to upload to Vectara. Supported file types: https://docs.vectara.com/docs/api-reference/indexing-apis/file-upload/file-upload-filetypes',
type: 'file'
},
{
label: 'Metadata Filter',
name: 'filter',
description:
'Filter to apply to Vectara metadata. Refer to the <a target="_blank" href="https://docs.flowiseai.com/vector-stores/vectara">documentation</a> on how to use Vectara filters with Flowise.',
type: 'string',
additionalParams: true,
optional: true
},
{
label: 'Sentences Before',
name: 'sentencesBefore',
description: 'Number of sentences to fetch before the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Sentences After',
name: 'sentencesAfter',
description: 'Number of sentences to fetch after the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Lambda',
name: 'lambda',
description:
'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Defaults to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Vectara Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Vectara Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(VectaraStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const customerId = getCredentialParam('customerID', credentialData, nodeData)
const corpusId = getCredentialParam('corpusID', credentialData, nodeData).split(',')
const fileBase64 = nodeData.inputs?.file
const vectaraMetadataFilter = nodeData.inputs?.filter as string
const sentencesBefore = nodeData.inputs?.sentencesBefore as number
const sentencesAfter = nodeData.inputs?.sentencesAfter as number
const lambda = nodeData.inputs?.lambda as number
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseInt(topK, 10) : 4
const vectaraArgs: VectaraLibArgs = {
apiKey: apiKey,
customerId: customerId,
corpusId: corpusId,
source: 'flowise'
}
const vectaraFilter: VectaraFilter = {}
if (vectaraMetadataFilter) vectaraFilter.filter = vectaraMetadataFilter
if (lambda) vectaraFilter.lambda = lambda
const vectaraContextConfig: VectaraContextConfig = {}
if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore
if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter
vectaraFilter.contextConfig = vectaraContextConfig
let files: string[] = []
const vectaraFiles: VectaraFile[] = []
if (fileBase64.startsWith('FILE-STORAGE::')) {
const fileName = fileBase64.replace('FILE-STORAGE::', '')
if (fileName.startsWith('[') && fileName.endsWith(']')) {
files = JSON.parse(fileName)
} else {
files = [fileName]
}
const chatflowid = options.chatflowid
for (const file of files) {
const fileData = await getFileFromStorage(file, chatflowid)
const blob = new Blob([fileData])
vectaraFiles.push({ blob: blob, fileName: getFileName(file) })
}
} else {
if (fileBase64.startsWith('[') && fileBase64.endsWith(']')) {
files = JSON.parse(fileBase64)
} else {
files = [fileBase64]
}
for (const file of files) {
const splitDataURI = file.split(',')
splitDataURI.pop()
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
const blob = new Blob([bf])
vectaraFiles.push({ blob: blob, fileName: getFileName(file) })
}
}
const vectorStore = new VectaraStore(vectaraArgs)
await vectorStore.addFiles(vectaraFiles)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k, vectaraFilter)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
const getFileName = (fileBase64: string) => {
let fileNames = []
if (fileBase64.startsWith('[') && fileBase64.endsWith(']')) {
const files = JSON.parse(fileBase64)
for (const file of files) {
const splitDataURI = file.split(',')
const filename = splitDataURI[splitDataURI.length - 1].split(':')[1]
fileNames.push(filename)
}
return fileNames.join(', ')
} else {
const splitDataURI = fileBase64.split(',')
const filename = splitDataURI[splitDataURI.length - 1].split(':')[1]
return filename
}
}
module.exports = { nodeClass: VectaraUpload_VectorStores }

View File

@ -1,155 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig } from '@langchain/community/vectorstores/vectara'
import { Document } from '@langchain/core/documents'
import { flatten } from 'lodash'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class VectaraUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Vectara Upsert Document'
this.name = 'vectaraUpsert'
this.version = 1.0
this.type = 'Vectara'
this.icon = 'vectara.png'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Vectara'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['vectaraApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Metadata Filter',
name: 'filter',
description:
'Filter to apply to Vectara metadata. Refer to the <a target="_blank" href="https://docs.flowiseai.com/vector-stores/vectara">documentation</a> on how to use Vectara filters with Flowise.',
type: 'string',
additionalParams: true,
optional: true
},
{
label: 'Sentences Before',
name: 'sentencesBefore',
description: 'Number of sentences to fetch before the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Sentences After',
name: 'sentencesAfter',
description: 'Number of sentences to fetch after the matched sentence. Defaults to 2.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Lambda',
name: 'lambda',
description:
'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Defaults to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Vectara Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Vectara Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(VectaraStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const customerId = getCredentialParam('customerID', credentialData, nodeData)
const corpusId = getCredentialParam('corpusID', credentialData, nodeData).split(',')
const docs = nodeData.inputs?.document as Document[]
const embeddings = {} as Embeddings
const vectaraMetadataFilter = nodeData.inputs?.filter as string
const sentencesBefore = nodeData.inputs?.sentencesBefore as number
const sentencesAfter = nodeData.inputs?.sentencesAfter as number
const lambda = nodeData.inputs?.lambda as number
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseInt(topK, 10) : 4
const vectaraArgs: VectaraLibArgs = {
apiKey: apiKey,
customerId: customerId,
corpusId: corpusId,
source: 'flowise'
}
const vectaraFilter: VectaraFilter = {}
if (vectaraMetadataFilter) vectaraFilter.filter = vectaraMetadataFilter
if (lambda) vectaraFilter.lambda = lambda
const vectaraContextConfig: VectaraContextConfig = {}
if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore
if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter
vectaraFilter.contextConfig = vectaraContextConfig
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const vectorStore = await VectaraStore.fromDocuments(finalDocs, embeddings, vectaraArgs)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k, vectaraFilter)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: VectaraUpsert_VectorStores }

View File

@ -32,7 +32,6 @@ class Weaviate_VectorStores implements INode {
this.description =
'Upsert embedded data and perform similarity or mmr search using Weaviate, a scalable open-source vector database'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,157 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import weaviate, { WeaviateClient, ApiKey } from 'weaviate-ts-client'
import { WeaviateLibArgs, WeaviateStore } from '@langchain/community/vectorstores/weaviate'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class Weaviate_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Weaviate Load Existing Index'
this.name = 'weaviateExistingIndex'
this.version = 1.0
this.type = 'Weaviate'
this.icon = 'weaviate.png'
this.category = 'Vector Stores'
this.description = 'Load existing index from Weaviate (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed when using Weaviate cloud hosted',
optional: true,
credentialNames: ['weaviateApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Weaviate Scheme',
name: 'weaviateScheme',
type: 'options',
default: 'https',
options: [
{
label: 'https',
name: 'https'
},
{
label: 'http',
name: 'http'
}
]
},
{
label: 'Weaviate Host',
name: 'weaviateHost',
type: 'string',
placeholder: 'localhost:8080'
},
{
label: 'Weaviate Index',
name: 'weaviateIndex',
type: 'string',
placeholder: 'Test'
},
{
label: 'Weaviate Text Key',
name: 'weaviateTextKey',
type: 'string',
placeholder: 'text',
optional: true,
additionalParams: true
},
{
label: 'Weaviate Metadata Keys',
name: 'weaviateMetadataKeys',
type: 'string',
rows: 4,
placeholder: `["foo"]`,
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Weaviate Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Weaviate Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(WeaviateStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const weaviateScheme = nodeData.inputs?.weaviateScheme as string
const weaviateHost = nodeData.inputs?.weaviateHost as string
const weaviateIndex = nodeData.inputs?.weaviateIndex as string
const weaviateTextKey = nodeData.inputs?.weaviateTextKey as string
const weaviateMetadataKeys = nodeData.inputs?.weaviateMetadataKeys as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const weaviateApiKey = getCredentialParam('weaviateApiKey', credentialData, nodeData)
const clientConfig: any = {
scheme: weaviateScheme,
host: weaviateHost
}
if (weaviateApiKey) clientConfig.apiKey = new ApiKey(weaviateApiKey)
const client: WeaviateClient = weaviate.client(clientConfig)
const obj: WeaviateLibArgs = {
client,
indexName: weaviateIndex
}
if (weaviateTextKey) obj.textKey = weaviateTextKey
if (weaviateMetadataKeys) obj.metadataKeys = JSON.parse(weaviateMetadataKeys.replace(/\s/g, ''))
const vectorStore = await WeaviateStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Weaviate_Existing_VectorStores }

View File

@ -1,174 +0,0 @@
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { WeaviateLibArgs, WeaviateStore } from '@langchain/community/vectorstores/weaviate'
import weaviate, { WeaviateClient, ApiKey } from 'weaviate-ts-client'
import { flatten } from 'lodash'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class WeaviateUpsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Weaviate Upsert Document'
this.name = 'weaviateUpsert'
this.version = 1.0
this.type = 'Weaviate'
this.icon = 'weaviate.png'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Weaviate'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Only needed when using Weaviate cloud hosted',
optional: true,
credentialNames: ['weaviateApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Weaviate Scheme',
name: 'weaviateScheme',
type: 'options',
default: 'https',
options: [
{
label: 'https',
name: 'https'
},
{
label: 'http',
name: 'http'
}
]
},
{
label: 'Weaviate Host',
name: 'weaviateHost',
type: 'string',
placeholder: 'localhost:8080'
},
{
label: 'Weaviate Index',
name: 'weaviateIndex',
type: 'string',
placeholder: 'Test'
},
{
label: 'Weaviate Text Key',
name: 'weaviateTextKey',
type: 'string',
placeholder: 'text',
optional: true,
additionalParams: true
},
{
label: 'Weaviate Metadata Keys',
name: 'weaviateMetadataKeys',
type: 'string',
rows: 4,
placeholder: `["foo"]`,
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Weaviate Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Weaviate Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(WeaviateStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const weaviateScheme = nodeData.inputs?.weaviateScheme as string
const weaviateHost = nodeData.inputs?.weaviateHost as string
const weaviateIndex = nodeData.inputs?.weaviateIndex as string
const weaviateTextKey = nodeData.inputs?.weaviateTextKey as string
const weaviateMetadataKeys = nodeData.inputs?.weaviateMetadataKeys as string
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const weaviateApiKey = getCredentialParam('weaviateApiKey', credentialData, nodeData)
const clientConfig: any = {
scheme: weaviateScheme,
host: weaviateHost
}
if (weaviateApiKey) clientConfig.apiKey = new ApiKey(weaviateApiKey)
const client: WeaviateClient = weaviate.client(clientConfig)
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const obj: WeaviateLibArgs = {
client,
indexName: weaviateIndex
}
if (weaviateTextKey) obj.textKey = weaviateTextKey
if (weaviateMetadataKeys) obj.metadataKeys = JSON.parse(weaviateMetadataKeys.replace(/\s/g, ''))
const vectorStore = await WeaviateStore.fromDocuments(finalDocs, embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: WeaviateUpsert_VectorStores }

View File

@ -31,7 +31,6 @@ class Zep_VectorStores implements INode {
this.description =
'Upsert embedded data and perform similarity or mmr search upon query using Zep, a fast and scalable building block for LLM apps'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -1,240 +0,0 @@
import { IDocument, ZepClient } from '@getzep/zep-js'
import { ZepVectorStore, IZepConfig } from '@langchain/community/vectorstores/zep'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Zep_Existing_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Zep Load Existing Index - Open Source'
this.name = 'zepExistingIndex'
this.version = 1.0
this.type = 'Zep'
this.icon = 'zep.svg'
this.category = 'Vector Stores'
this.description = 'Load existing index from Zep (i.e: Document has been upserted)'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
optional: true,
description: 'Configure JWT authentication on your Zep instance (Optional)',
credentialNames: ['zepMemoryApi']
}
this.inputs = [
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Base URL',
name: 'baseURL',
type: 'string',
default: 'http://127.0.0.1:8000'
},
{
label: 'Zep Collection',
name: 'zepCollection',
type: 'string',
placeholder: 'my-first-collection'
},
{
label: 'Zep Metadata Filter',
name: 'zepMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Embedding Dimension',
name: 'dimension',
type: 'number',
default: 1536,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Zep Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Zep Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(ZepVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const baseURL = nodeData.inputs?.baseURL as string
const zepCollection = nodeData.inputs?.zepCollection as string
const zepMetadataFilter = nodeData.inputs?.zepMetadataFilter
const dimension = nodeData.inputs?.dimension as number
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const zepConfig: IZepConfig & Partial<ZepFilter> = {
apiUrl: baseURL,
collectionName: zepCollection,
embeddingDimensions: dimension,
isAutoEmbedded: false
}
if (apiKey) zepConfig.apiKey = apiKey
if (zepMetadataFilter) {
const metadatafilter = typeof zepMetadataFilter === 'object' ? zepMetadataFilter : JSON.parse(zepMetadataFilter)
zepConfig.filter = metadatafilter
}
const vectorStore = await ZepExistingVS.fromExistingIndex(embeddings, zepConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
if (zepMetadataFilter) {
;(vectorStore as any).filter = zepConfig.filter
}
return vectorStore
}
return vectorStore
}
}
interface ZepFilter {
filter: Record<string, any>
}
function zepDocsToDocumentsAndScore(results: IDocument[]): [Document, number][] {
return results.map((d) => [
new Document({
pageContent: d.content,
metadata: d.metadata
}),
d.score ? d.score : 0
])
}
function assignMetadata(value: string | Record<string, unknown> | object | undefined): Record<string, unknown> | undefined {
if (typeof value === 'object' && value !== null) {
return value as Record<string, unknown>
}
if (value !== undefined) {
console.warn('Metadata filters must be an object, Record, or undefined.')
}
return undefined
}
class ZepExistingVS extends ZepVectorStore {
filter?: Record<string, any>
args?: IZepConfig & Partial<ZepFilter>
constructor(embeddings: Embeddings, args: IZepConfig & Partial<ZepFilter>) {
super(embeddings, args)
this.filter = args.filter
this.args = args
}
async initalizeCollection(args: IZepConfig & Partial<ZepFilter>) {
this.client = await ZepClient.init(args.apiUrl, args.apiKey)
try {
this.collection = await this.client.document.getCollection(args.collectionName)
} catch (err) {
if (err instanceof Error) {
if (err.name === 'NotFoundError') {
await this.createNewCollection(args)
} else {
throw err
}
}
}
}
async createNewCollection(args: IZepConfig & Partial<ZepFilter>) {
if (!args.embeddingDimensions) {
throw new Error(
`Collection ${args.collectionName} not found. You can create a new Collection by providing embeddingDimensions.`
)
}
this.collection = await this.client.document.addCollection({
name: args.collectionName,
description: args.description,
metadata: args.metadata,
embeddingDimensions: args.embeddingDimensions,
isAutoEmbedded: false
})
}
async similaritySearchVectorWithScore(
query: number[],
k: number,
filter?: Record<string, unknown> | undefined
): Promise<[Document, number][]> {
if (filter && this.filter) {
throw new Error('cannot provide both `filter` and `this.filter`')
}
const _filters = filter ?? this.filter
const ANDFilters = []
for (const filterKey in _filters) {
let filterVal = _filters[filterKey]
if (typeof filterVal === 'string') filterVal = `"${filterVal}"`
ANDFilters.push({ jsonpath: `$[*] ? (@.${filterKey} == ${filterVal})` })
}
const newfilter = {
where: { and: ANDFilters }
}
await this.initalizeCollection(this.args!).catch((err) => {
console.error('Error initializing collection:', err)
throw err
})
const results = await this.collection.search(
{
embedding: new Float32Array(query),
metadata: assignMetadata(newfilter)
},
k
)
return zepDocsToDocumentsAndScore(results)
}
static async fromExistingIndex(embeddings: Embeddings, dbConfig: IZepConfig & Partial<ZepFilter>): Promise<ZepVectorStore> {
const instance = new this(embeddings, dbConfig)
return instance
}
}
module.exports = { nodeClass: Zep_Existing_VectorStores }

View File

@ -1,137 +0,0 @@
import { flatten } from 'lodash'
import { ZepVectorStore, IZepConfig } from '@langchain/community/vectorstores/zep'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Zep_Upsert_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Zep Upsert Document - Open Source'
this.name = 'zepUpsert'
this.version = 1.0
this.type = 'Zep'
this.icon = 'zep.svg'
this.category = 'Vector Stores'
this.description = 'Upsert documents to Zep'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'DEPRECATING'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
optional: true,
description: 'Configure JWT authentication on your Zep instance (Optional)',
credentialNames: ['zepMemoryApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Base URL',
name: 'baseURL',
type: 'string',
default: 'http://127.0.0.1:8000'
},
{
label: 'Zep Collection',
name: 'zepCollection',
type: 'string',
placeholder: 'my-first-collection'
},
{
label: 'Embedding Dimension',
name: 'dimension',
type: 'number',
default: 1536,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Zep Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Zep Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(ZepVectorStore)]
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const baseURL = nodeData.inputs?.baseURL as string
const zepCollection = nodeData.inputs?.zepCollection as string
const dimension = (nodeData.inputs?.dimension as number) ?? 1536
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const zepConfig: IZepConfig = {
apiUrl: baseURL,
collectionName: zepCollection,
embeddingDimensions: dimension,
isAutoEmbedded: false
}
if (apiKey) zepConfig.apiKey = apiKey
const vectorStore = await ZepVectorStore.fromDocuments(finalDocs, embeddings, zepConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Zep_Upsert_VectorStores }

View File

@ -32,7 +32,6 @@ class Zep_CloudVectorStores implements INode {
this.description =
'Upsert embedded data and perform similarity or mmr search upon query using Zep, a fast and scalable building block for LLM apps'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',

View File

@ -58,7 +58,9 @@ export class ConsoleCallbackHandler extends BaseTracer {
constructor(logger: Logger) {
super()
this.logger = logger
logger.level = getEnvironmentVariable('LOG_LEVEL') ?? 'info'
if (getEnvironmentVariable('DEBUG') === 'true') {
logger.level = getEnvironmentVariable('LOG_LEVEL') ?? 'info'
}
}
getParents(run: Run) {

View File

@ -26,7 +26,7 @@ PORT=3000
# DEBUG=true
# LOG_PATH=/your_log_path/.flowise/logs
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# LOG_LEVEL=info (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash

View File

@ -1,5 +1,7 @@
{
"description": "Customer support team consisting of Support Representative and Quality Assurance Specialist to handle support tickets",
"framework": ["Langchain"],
"usecases": ["Customer Support"],
"nodes": [
{
"id": "supervisor_0",

View File

@ -1,5 +1,7 @@
{
"description": "Research leads and create personalized email drafts for sales team",
"framework": ["Langchain"],
"usecases": ["Leads"],
"nodes": [
{
"id": "supervisor_0",

View File

@ -1,5 +1,7 @@
{
"description": "A team of portfolio manager, financial analyst, and risk manager working together to optimize an investment portfolio.",
"framework": ["Langchain"],
"usecases": ["Finance & Accounting"],
"nodes": [
{
"id": "supervisor_0",

View File

@ -1,5 +1,7 @@
{
"description": "Software engineering team working together to build a feature, solve a problem, or complete a task.",
"framework": ["Langchain"],
"usecases": ["Engineering"],
"nodes": [
{
"id": "supervisor_0",

View File

@ -1,5 +1,7 @@
{
"description": "Text to SQL query process using team of 3 agents: SQL Expert, SQL Reviewer, and SQL Executor",
"framework": ["Langchain"],
"usecases": ["SQL"],
"nodes": [
{
"id": "supervisor_0",

View File

@ -1,11 +1,11 @@
{
"description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation",
"categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"framework": ["Langchain"],
"usecases": ["Interacting with API"],
"nodes": [
{
"width": 300,
"height": 459,
"height": 460,
"id": "getApiChain_0",
"position": {
"x": 1222.6923202234623,
@ -94,7 +94,7 @@
},
{
"width": 300,
"height": 602,
"height": 603,
"id": "chainTool_0",
"position": {
"x": 1600.1485877701232,
@ -145,7 +145,7 @@
"inputs": {
"name": "weather-qa",
"description": "useful for when you need to ask question about weather",
"returnDirect": "",
"returnDirect": false,
"baseChain": "{{getApiChain_0.data.instance}}"
},
"outputAnchors": [
@ -168,7 +168,7 @@
},
{
"width": 300,
"height": 376,
"height": 253,
"id": "bufferMemory_0",
"position": {
"x": 1642.0644080121785,
@ -229,7 +229,7 @@
},
{
"width": 300,
"height": 602,
"height": 603,
"id": "chainTool_1",
"position": {
"x": 1284.7746596034926,
@ -303,7 +303,7 @@
},
{
"width": 300,
"height": 459,
"height": 460,
"id": "postApiChain_0",
"position": {
"x": 933.3631140153886,
@ -367,7 +367,7 @@
],
"inputs": {
"model": "{{chatOpenAI_2.data.instance}}",
"apiDocs": "API documentation:\nEndpoint: https://eog776prcv6dg0j.m.pipedream.net\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string",
"apiDocs": "API documentation:\nEndpoint: https://some-discord-webhook.com\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string",
"headers": "",
"urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:",
"ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:"
@ -392,7 +392,7 @@
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_2",
"position": {
"x": 572.8941615312035,
@ -402,7 +402,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -564,17 +564,17 @@
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_1",
"position": {
"x": 828.7788305309582,
"y": 302.8996144964516
"x": 859.9597222599807,
"y": 163.26344718821986
},
"type": "customNode",
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -705,7 +705,7 @@
],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"temperature": "0.6",
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
@ -729,14 +729,14 @@
},
"selected": false,
"positionAbsolute": {
"x": 828.7788305309582,
"y": 302.8996144964516
"x": 859.9597222599807,
"y": 163.26344718821986
},
"dragging": false
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_3",
"position": {
"x": 1148.338912314111,
@ -746,7 +746,7 @@
"data": {
"id": "chatOpenAI_3",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -907,33 +907,31 @@
"dragging": false
},
{
"width": 300,
"height": 383,
"id": "conversationalAgent_0",
"id": "toolAgent_0",
"position": {
"x": 2090.570467632979,
"y": 969.5131357270544
"x": 2087.462952706838,
"y": 974.6001334100872
},
"type": "customNode",
"data": {
"id": "conversationalAgent_0",
"label": "Conversational Agent",
"version": 3,
"name": "conversationalAgent",
"id": "toolAgent_0",
"label": "Tool Agent",
"version": 1,
"name": "toolAgent",
"type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
"category": "Agents",
"description": "Conversational agent for a chat model. It will utilize chat specific prompts",
"description": "Agent that uses Function Calling to pick the tools and args to call",
"inputParams": [
{
"label": "System Message",
"name": "systemMessage",
"type": "string",
"default": "You are a helpful AI assistant.",
"rows": 4,
"default": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.",
"optional": true,
"additionalParams": true,
"id": "conversationalAgent_0-input-systemMessage-string"
"id": "toolAgent_0-input-systemMessage-string"
},
{
"label": "Max Iterations",
@ -941,28 +939,29 @@
"type": "number",
"optional": true,
"additionalParams": true,
"id": "conversationalAgent_0-input-maxIterations-number"
"id": "toolAgent_0-input-maxIterations-number"
}
],
"inputAnchors": [
{
"label": "Allowed Tools",
"label": "Tools",
"name": "tools",
"type": "Tool",
"list": true,
"id": "conversationalAgent_0-input-tools-Tool"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"id": "conversationalAgent_0-input-model-BaseChatModel"
"id": "toolAgent_0-input-tools-Tool"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
"id": "toolAgent_0-input-memory-BaseChatMemory"
},
{
"label": "Tool Calling Chat Model",
"name": "model",
"type": "BaseChatModel",
"description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat",
"id": "toolAgent_0-input-model-BaseChatModel"
},
{
"label": "Input Moderation",
@ -971,31 +970,88 @@
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalAgent_0-input-inputModeration-Moderation"
"id": "toolAgent_0-input-inputModeration-Moderation"
}
],
"inputs": {
"inputModeration": "",
"tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"],
"model": "{{chatOpenAI_3.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"systemMessage": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist."
"model": "{{chatOpenAI_3.data.instance}}",
"systemMessage": "You are a helpful AI assistant.",
"inputModeration": "",
"maxIterations": ""
},
"outputAnchors": [
{
"id": "conversationalAgent_0-output-conversationalAgent-AgentExecutor|BaseChain|Runnable",
"name": "conversationalAgent",
"id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable",
"name": "toolAgent",
"label": "AgentExecutor",
"description": "Agent that uses Function Calling to pick the tools and args to call",
"type": "AgentExecutor | BaseChain | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 435,
"selected": false,
"positionAbsolute": {
"x": 2090.570467632979,
"y": 969.5131357270544
"x": 2087.462952706838,
"y": 974.6001334100872
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 2081.8371244608006,
"y": 595.924073574161
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Using agent, we give it 2 tools that is each attached to a GET/POST API Chain.\n\nThe goal is to have the agent to decide when to use which tool. \n\nWhen the tool is being used, API Chain's task is to figure out the correct URL and params to make the HTTP call.\n\nHowever, it is recommended to use OpenAPI YML to give a more structured input to LLM, for better quality output.\n\nExample question:\nSend me the weather of SF today to my discord"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 364,
"selected": false,
"positionAbsolute": {
"x": 2081.8371244608006,
"y": 595.924073574161
},
"dragging": false
}
@ -1048,46 +1104,34 @@
{
"source": "chainTool_0",
"sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-tools-Tool",
"target": "toolAgent_0",
"targetHandle": "toolAgent_0-input-tools-Tool",
"type": "buttonedge",
"id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
"data": {
"label": ""
}
"id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-toolAgent_0-toolAgent_0-input-tools-Tool"
},
{
"source": "chainTool_1",
"sourceHandle": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-tools-Tool",
"target": "toolAgent_0",
"targetHandle": "toolAgent_0-input-tools-Tool",
"type": "buttonedge",
"id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_3",
"sourceHandle": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOpenAI_3-chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalAgent_0-conversationalAgent_0-input-model-BaseChatModel",
"data": {
"label": ""
}
"id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-toolAgent_0-toolAgent_0-input-tools-Tool"
},
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-memory-BaseChatMemory",
"target": "toolAgent_0",
"targetHandle": "toolAgent_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationalAgent_0-conversationalAgent_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory"
},
{
"source": "chatOpenAI_3",
"sourceHandle": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
"target": "toolAgent_0",
"targetHandle": "toolAgent_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOpenAI_3-chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-toolAgent_0-toolAgent_0-input-model-BaseChatModel"
}
]
}

View File

@ -1,16 +1,15 @@
{
"description": "Return response as a JSON structure as specified by a Zod schema",
"categories": "AdvancedStructuredOutputParser,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "NEW",
"framework": ["Langchain"],
"usecases": ["Extraction"],
"nodes": [
{
"width": 300,
"height": 508,
"id": "llmChain_0",
"position": {
"x": 1229.1699649849293,
"y": 245.55173505632646
"x": 1224.5123724068537,
"y": 203.63340185364572
},
"type": "customNode",
"data": {
@ -97,18 +96,19 @@
"selected": false
},
"positionAbsolute": {
"x": 1229.1699649849293,
"y": 245.55173505632646
"x": 1224.5123724068537,
"y": 203.63340185364572
},
"selected": false
"selected": false,
"dragging": false
},
{
"width": 300,
"height": 690,
"id": "chatPromptTemplate_0",
"position": {
"x": 493.26582927222483,
"y": -156.20470841335592
"x": 62.32815086916713,
"y": -173.7208464588945
},
"type": "customNode",
"data": {
@ -166,24 +166,24 @@
},
"selected": false,
"positionAbsolute": {
"x": 493.26582927222483,
"y": -156.20470841335592
"x": 62.32815086916713,
"y": -173.7208464588945
},
"dragging": false
},
{
"width": 300,
"height": 576,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 860.555928011636,
"y": -355.71028569475095
"x": 851.2457594432603,
"y": -352.1518756201128
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -320,7 +320,7 @@
],
"inputs": {
"cache": "",
"modelName": "",
"modelName": "gpt-4-turbo",
"temperature": "0",
"maxTokens": "",
"topP": "",
@ -345,8 +345,8 @@
},
"selected": false,
"positionAbsolute": {
"x": 860.555928011636,
"y": -355.71028569475095
"x": 851.2457594432603,
"y": -352.1518756201128
},
"dragging": false
},
@ -355,8 +355,8 @@
"height": 454,
"id": "advancedStructuredOutputParser_0",
"position": {
"x": 489.3637511211284,
"y": 580.0628053662244
"x": 449.77421420748544,
"y": -72.00015556436546
},
"type": "customNode",
"data": {
@ -389,8 +389,8 @@
],
"inputAnchors": [],
"inputs": {
"autofixParser": "",
"exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})"
"autofixParser": true,
"exampleJson": "z.array(z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n}))"
},
"outputAnchors": [
{
@ -406,9 +406,62 @@
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 489.3637511211284,
"y": 580.0628053662244
"x": 449.77421420748544,
"y": -72.00015556436546
}
},
{
"id": "stickyNote_0",
"position": {
"x": 1224.8602820360084,
"y": 45.252502534529725
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "This template is designed to give output in JSON format defined in the Output Parser.\n\nExample question:\nTop 5 movies of all time"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 123,
"selected": false,
"positionAbsolute": {
"x": 1224.8602820360084,
"y": 45.252502534529725
},
"dragging": false
}
],
"edges": [

View File

@ -1,11 +1,11 @@
{
"description": "Output antonym of given user input using few-shot prompt template built with examples",
"categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"framework": ["Langchain"],
"usecases": ["Basic"],
"nodes": [
{
"width": 300,
"height": 955,
"height": 956,
"id": "fewShotPromptTemplate_1",
"position": {
"x": 886.3229032369354,
@ -107,7 +107,7 @@
},
{
"width": 300,
"height": 475,
"height": 513,
"id": "promptTemplate_0",
"position": {
"x": 540.0140796251119,
@ -167,179 +167,7 @@
},
{
"width": 300,
"height": 574,
"id": "chatOpenAI_0",
"position": {
"x": 1226.7977900193628,
"y": -22.01100655894436
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "gpt-3.5-turbo",
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1226.7977900193628,
"y": -22.01100655894436
},
"dragging": false
},
{
"width": 300,
"height": 456,
"height": 508,
"id": "llmChain_0",
"position": {
"x": 1609.3428158423485,
@ -435,6 +263,239 @@
"y": 409.3763727612179
},
"dragging": false
},
{
"id": "chatOpenAI_0",
"position": {
"x": 1220.4459070421062,
"y": -80.75004891987845
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "gpt-3.5-turbo",
"id": "chatOpenAI_0-input-modelName-asyncOptions"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low"
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 670,
"selected": false,
"positionAbsolute": {
"x": 1220.4459070421062,
"y": -80.75004891987845
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 1607.723380325684,
"y": 245.15558433515412
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Using few shot examples, we let LLM learns from the examples.\n\nThis template showcase how we can let LLM gives output as an antonym for given input"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 143,
"selected": false,
"positionAbsolute": {
"x": 1607.723380325684,
"y": 245.15558433515412
},
"dragging": false
}
],
"edges": [
@ -449,17 +510,6 @@
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-llmChain_0-llmChain_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
},
{
"source": "fewShotPromptTemplate_1",
"sourceHandle": "fewShotPromptTemplate_1-output-fewShotPromptTemplate-FewShotPromptTemplate|BaseStringPromptTemplate|BasePromptTemplate",
@ -470,6 +520,14 @@
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel"
}
]
}

View File

@ -1,15 +1,15 @@
{
"description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion",
"categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"description": "AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion",
"framework": ["Langchain"],
"usecases": ["Reflective Agent"],
"nodes": [
{
"width": 300,
"height": 627,
"height": 679,
"id": "autoGPT_0",
"position": {
"x": 1627.8124366169843,
"y": 129.76619452400155
"x": 1566.5228556278,
"y": 48.800017192230115
},
"type": "customNode",
"data": {
@ -79,7 +79,7 @@
],
"inputs": {
"inputModeration": "",
"tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"],
"tools": ["{{serpAPI_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"aiName": "",
@ -99,118 +99,18 @@
},
"selected": false,
"positionAbsolute": {
"x": 1627.8124366169843,
"y": 129.76619452400155
"x": 1566.5228556278,
"y": 48.800017192230115
},
"dragging": false
},
{
"width": 300,
"height": 278,
"id": "writeFile_1",
"position": {
"x": 539.4976647298655,
"y": 36.45930212160803
},
"type": "customNode",
"data": {
"id": "writeFile_1",
"label": "Write File",
"version": 1,
"name": "writeFile",
"type": "WriteFile",
"baseClasses": ["WriteFile", "Tool", "StructuredTool", "BaseLangChain"],
"category": "Tools",
"description": "Write file to disk",
"inputParams": [
{
"label": "Base Path",
"name": "basePath",
"placeholder": "C:\\Users\\User\\Desktop",
"type": "string",
"optional": true,
"id": "writeFile_1-input-basePath-string"
}
],
"inputAnchors": [],
"inputs": {
"basePath": ""
},
"outputAnchors": [
{
"id": "writeFile_1-output-writeFile-WriteFile|Tool|StructuredTool|BaseLangChain",
"name": "writeFile",
"label": "WriteFile",
"type": "WriteFile | Tool | StructuredTool | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"positionAbsolute": {
"x": 539.4976647298655,
"y": 36.45930212160803
},
"selected": false,
"dragging": false
},
{
"width": 300,
"height": 278,
"id": "readFile_0",
"position": {
"x": 881.2568465391292,
"y": -112.9631005153393
},
"type": "customNode",
"data": {
"id": "readFile_0",
"label": "Read File",
"version": 1,
"name": "readFile",
"type": "ReadFile",
"baseClasses": ["ReadFile", "Tool", "StructuredTool", "BaseLangChain"],
"category": "Tools",
"description": "Read file from disk",
"inputParams": [
{
"label": "Base Path",
"name": "basePath",
"placeholder": "C:\\Users\\User\\Desktop",
"type": "string",
"optional": true,
"id": "readFile_0-input-basePath-string"
}
],
"inputAnchors": [],
"inputs": {
"basePath": ""
},
"outputAnchors": [
{
"id": "readFile_0-output-readFile-ReadFile|Tool|StructuredTool|BaseLangChain",
"name": "readFile",
"label": "ReadFile",
"type": "ReadFile | Tool | StructuredTool | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 881.2568465391292,
"y": -112.9631005153393
},
"dragging": false
},
{
"width": 300,
"height": 277,
"height": 276,
"id": "serpAPI_0",
"position": {
"x": 1247.066832724479,
"y": -193.77467220135756
"x": 1207.9685973743674,
"y": -216.77363417201138
},
"type": "customNode",
"data": {
@ -246,24 +146,24 @@
},
"selected": false,
"positionAbsolute": {
"x": 1247.066832724479,
"y": -193.77467220135756
"x": 1207.9685973743674,
"y": -216.77363417201138
},
"dragging": false
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 176.69787776192283,
"y": -116.3808686218022
"x": 861.5955028972123,
"y": -322.72984118549857
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -418,24 +318,24 @@
},
"selected": false,
"positionAbsolute": {
"x": 176.69787776192283,
"y": -116.3808686218022
"x": 861.5955028972123,
"y": -322.72984118549857
},
"dragging": false
},
{
"width": 300,
"height": 329,
"height": 424,
"id": "openAIEmbeddings_0",
"position": {
"x": 606.7317612889267,
"y": 439.5269912996025
"x": 116.62153412789377,
"y": 52.465581131402246
},
"type": "customNode",
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 3,
"version": 4,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -455,7 +355,7 @@
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "text-embedding-ada-002",
"id": "openAIEmbeddings_0-input-modelName-options"
"id": "openAIEmbeddings_0-input-modelName-asyncOptions"
},
{
"label": "Strip New Lines",
@ -488,21 +388,31 @@
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-basepath-string"
},
{
"label": "Dimensions",
"name": "dimensions",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-dimensions-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "text-embedding-ada-002",
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": "",
"modelName": "text-embedding-ada-002"
"dimensions": ""
},
"outputAnchors": [
{
"id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"name": "openAIEmbeddings",
"label": "OpenAIEmbeddings",
"description": "OpenAI API to generate embeddings for a given text",
"type": "OpenAIEmbeddings | Embeddings"
}
],
@ -511,24 +421,24 @@
},
"selected": false,
"positionAbsolute": {
"x": 606.7317612889267,
"y": 439.5269912996025
"x": 116.62153412789377,
"y": 52.465581131402246
},
"dragging": false
},
{
"width": 300,
"height": 555,
"height": 606,
"id": "pinecone_0",
"position": {
"x": 1061.413729190394,
"y": 387.9611693492896
"x": 512.2389361920059,
"y": -36.80102752360557
},
"type": "customNode",
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 2,
"version": 3,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
@ -629,11 +539,20 @@
"name": "embeddings",
"type": "Embeddings",
"id": "pinecone_0-input-embeddings-Embeddings"
},
{
"label": "Record Manager",
"name": "recordManager",
"type": "RecordManager",
"description": "Keep track of the record to prevent duplication",
"optional": true,
"id": "pinecone_0-input-recordManager-RecordManager"
}
],
"inputs": {
"document": "",
"embeddings": "{{openAIEmbeddings_0.data.instance}}",
"recordManager": "",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
@ -647,17 +566,20 @@
"name": "output",
"label": "Output",
"type": "options",
"description": "",
"options": [
{
"id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
"name": "retriever",
"label": "Pinecone Retriever",
"description": "",
"type": "Pinecone | VectorStoreRetriever | BaseRetriever"
},
{
"id": "pinecone_0-output-vectorStore-Pinecone|VectorStore",
"name": "vectorStore",
"label": "Pinecone Vector Store",
"description": "",
"type": "Pinecone | VectorStore"
}
],
@ -671,35 +593,66 @@
},
"selected": false,
"positionAbsolute": {
"x": 1061.413729190394,
"y": 387.9611693492896
"x": 512.2389361920059,
"y": -36.80102752360557
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 1565.5672914362437,
"y": -138.9994972608436
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "An agent that uses long-term memory (Pinecone in this example) together with a prompt for self-guided task completion.\n\nAgent has access to Serp API tool to search the web, and store the continuous results to Pinecone"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 163,
"selected": false,
"positionAbsolute": {
"x": 1565.5672914362437,
"y": -138.9994972608436
},
"dragging": false
}
],
"edges": [
{
"source": "writeFile_1",
"sourceHandle": "writeFile_1-output-writeFile-WriteFile|Tool|StructuredTool|BaseLangChain",
"target": "autoGPT_0",
"targetHandle": "autoGPT_0-input-tools-Tool",
"type": "buttonedge",
"id": "writeFile_1-writeFile_1-output-writeFile-WriteFile|Tool|StructuredTool|BaseLangChain-autoGPT_0-autoGPT_0-input-tools-Tool",
"data": {
"label": ""
}
},
{
"source": "readFile_0",
"sourceHandle": "readFile_0-output-readFile-ReadFile|Tool|StructuredTool|BaseLangChain",
"target": "autoGPT_0",
"targetHandle": "autoGPT_0-input-tools-Tool",
"type": "buttonedge",
"id": "readFile_0-readFile_0-output-readFile-ReadFile|Tool|StructuredTool|BaseLangChain-autoGPT_0-autoGPT_0-input-tools-Tool",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",

View File

@ -1,11 +1,11 @@
{
"description": "Use BabyAGI to create tasks and reprioritize for a given objective",
"categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"framework": ["Langchain"],
"usecases": ["Reflective Agent"],
"nodes": [
{
"width": 300,
"height": 379,
"height": 431,
"id": "babyAGI_1",
"position": {
"x": 950.8042093214954,
@ -79,7 +79,7 @@
},
{
"width": 300,
"height": 329,
"height": 424,
"id": "openAIEmbeddings_0",
"position": {
"x": -111.82510263637522,
@ -89,7 +89,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 3,
"version": 4,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -109,7 +109,7 @@
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "text-embedding-ada-002",
"id": "openAIEmbeddings_0-input-modelName-options"
"id": "openAIEmbeddings_0-input-modelName-asyncOptions"
},
{
"label": "Strip New Lines",
@ -142,21 +142,31 @@
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-basepath-string"
},
{
"label": "Dimensions",
"name": "dimensions",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-dimensions-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "text-embedding-ada-002",
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": "",
"modelName": "text-embedding-ada-002"
"dimensions": ""
},
"outputAnchors": [
{
"id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"name": "openAIEmbeddings",
"label": "OpenAIEmbeddings",
"description": "OpenAI API to generate embeddings for a given text",
"type": "OpenAIEmbeddings | Embeddings"
}
],
@ -172,17 +182,17 @@
},
{
"width": 300,
"height": 555,
"height": 606,
"id": "pinecone_0",
"position": {
"x": 238.1350223788262,
"y": -133.38073692212225
"x": 245.707825551803,
"y": -176.9243551667388
},
"type": "customNode",
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 2,
"version": 3,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
@ -283,11 +293,20 @@
"name": "embeddings",
"type": "Embeddings",
"id": "pinecone_0-input-embeddings-Embeddings"
},
{
"label": "Record Manager",
"name": "recordManager",
"type": "RecordManager",
"description": "Keep track of the record to prevent duplication",
"optional": true,
"id": "pinecone_0-input-recordManager-RecordManager"
}
],
"inputs": {
"document": "",
"embeddings": "{{openAIEmbeddings_0.data.instance}}",
"recordManager": "",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
@ -301,17 +320,20 @@
"name": "output",
"label": "Output",
"type": "options",
"description": "",
"options": [
{
"id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
"name": "retriever",
"label": "Pinecone Retriever",
"description": "",
"type": "Pinecone | VectorStoreRetriever | BaseRetriever"
},
{
"id": "pinecone_0-output-vectorStore-Pinecone|VectorStore",
"name": "vectorStore",
"label": "Pinecone Vector Store",
"description": "",
"type": "Pinecone | VectorStore"
}
],
@ -325,24 +347,24 @@
},
"selected": false,
"positionAbsolute": {
"x": 238.1350223788262,
"y": -133.38073692212225
"x": 245.707825551803,
"y": -176.9243551667388
},
"dragging": false
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 600.5963052289515,
"y": -359.24280496678995
"x": 597.7565040390853,
"y": -381.01461408909825
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -504,8 +526,61 @@
},
"selected": false,
"positionAbsolute": {
"x": 600.5963052289515,
"y": -359.24280496678995
"x": 597.7565040390853,
"y": -381.01461408909825
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 949.0763123880214,
"y": -172.0310628893923
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "BabyAGI is made up of 3 components:\n\n- A chain responsible for creating tasks\n- A chain responsible for prioritising tasks\n- A chain responsible for executing tasks\n\nThese chains are executed in sequence until the task list is empty or the maximum number of iterations is reached"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 203,
"selected": false,
"positionAbsolute": {
"x": 949.0763123880214,
"y": -172.0310628893923
},
"dragging": false
}

View File

@ -1,87 +1,11 @@
{
"description": "Analyse and summarize CSV data",
"categories": "CSV Agent,ChatOpenAI,Langchain",
"framework": "Langchain",
"usecases": ["Working with tables"],
"framework": ["Langchain"],
"nodes": [
{
"width": 300,
"height": 377,
"id": "csvAgent_0",
"position": {
"x": 1064.0780498701288,
"y": 284.44352695304724
},
"type": "customNode",
"data": {
"id": "csvAgent_0",
"label": "CSV Agent",
"name": "csvAgent",
"version": 3,
"type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents",
"description": "Agent used to to answer queries on CSV data",
"inputParams": [
{
"label": "Csv File",
"name": "csvFile",
"type": "file",
"fileType": ".csv",
"id": "csvAgent_0-input-csvFile-file"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "csvAgent_0-input-model-BaseLanguageModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "csvAgent_0-input-inputModeration-Moderation"
},
{
"label": "Custom Pandas Read_CSV Code",
"description": "Custom Pandas <a target=\"_blank\" href=\"https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\">read_csv</a> function. Takes in an input: \"csv_data\"",
"name": "customReadCSV",
"default": "read_csv(csv_data)",
"type": "code",
"optional": true,
"additionalParams": true,
"id": "csvAgent_0-input-customReadCSV-code"
}
],
"inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}"
},
"outputAnchors": [
{
"id": "csvAgent_0-output-csvAgent-AgentExecutor|BaseChain",
"name": "csvAgent",
"label": "AgentExecutor",
"type": "AgentExecutor | BaseChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1064.0780498701288,
"y": 284.44352695304724
},
"dragging": false
},
{
"width": 300,
"height": 522,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 657.3762197414501,
@ -91,8 +15,8 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6,
"name": "chatOpenAI",
"version": 6.0,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -250,6 +174,148 @@
"y": 220.2950766042332
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 1382.0413608492051,
"y": 331.1861177099975
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "This agent uses the following steps:\n\n1. Convert CSV file to Dataframe object\n\n2. Instruct LLM to generate Python code to answer user question using the dataframe provided\n\n3. Return the result in a natural language response\n\nYou can also specify the system message and custom \"read_csv file\" function. This allows more flexibility of reading CSV file with different delimiter, separator etc."
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 324,
"selected": false,
"positionAbsolute": {
"x": 1382.0413608492051,
"y": 331.1861177099975
},
"dragging": false
},
{
"id": "csvAgent_0",
"position": {
"x": 1040.029472715762,
"y": 293.0369370063613
},
"type": "customNode",
"data": {
"id": "csvAgent_0",
"label": "CSV Agent",
"version": 3,
"name": "csvAgent",
"type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
"category": "Agents",
"description": "Agent used to to answer queries on CSV data",
"inputParams": [
{
"label": "Csv File",
"name": "csvFile",
"type": "file",
"fileType": ".csv",
"id": "csvAgent_0-input-csvFile-file"
},
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"additionalParams": true,
"optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.",
"id": "csvAgent_0-input-systemMessagePrompt-string"
},
{
"label": "Custom Pandas Read_CSV Code",
"description": "Custom Pandas <a target=\"_blank\" href=\"https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\">read_csv</a> function. Takes in an input: \"csv_data\"",
"name": "customReadCSV",
"default": "read_csv(csv_data)",
"type": "code",
"optional": true,
"additionalParams": true,
"id": "csvAgent_0-input-customReadCSV-code"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "csvAgent_0-input-model-BaseLanguageModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "csvAgent_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"systemMessagePrompt": "",
"inputModeration": "",
"customReadCSV": "read_csv(csv_data)"
},
"outputAnchors": [
{
"id": "csvAgent_0-output-csvAgent-AgentExecutor|BaseChain|Runnable",
"name": "csvAgent",
"label": "AgentExecutor",
"description": "Agent used to to answer queries on CSV data",
"type": "AgentExecutor | BaseChain | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 464,
"selected": false,
"positionAbsolute": {
"x": 1040.029472715762,
"y": 293.0369370063613
},
"dragging": false
}
],
"edges": [
@ -259,10 +325,7 @@
"target": "csvAgent_0",
"targetHandle": "csvAgent_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-csvAgent_0-csvAgent_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-csvAgent_0-csvAgent_0-input-model-BaseLanguageModel"
}
]
}

View File

@ -1,679 +0,0 @@
{
"description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions",
"categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
"height": 483,
"id": "conversationalRetrievalQAChain_0",
"position": {
"x": 1499.2693059023254,
"y": 430.03911199833317
},
"type": "customNode",
"data": {
"id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain",
"version": 3,
"name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
},
{
"label": "Rephrase Prompt",
"name": "rephrasePrompt",
"type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4,
"additionalParams": true,
"optional": true,
"default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
},
{
"label": "Response Prompt",
"name": "responsePrompt",
"type": "string",
"description": "Taking the rephrased question, search for answer from the provided context",
"warning": "Prompt must include input variable: {context}",
"rows": 4,
"additionalParams": true,
"optional": true,
"default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
},
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "BaseRetriever",
"id": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseMemory",
"optional": true,
"description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
"memory": "",
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
},
"outputAnchors": [
{
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1499.2693059023254,
"y": 430.03911199833317
},
"dragging": false
},
{
"width": 300,
"height": 408,
"id": "memoryVectorStore_0",
"position": {
"x": 1082.0280622332507,
"y": 589.9990964387842
},
"type": "customNode",
"data": {
"id": "memoryVectorStore_0",
"label": "In-Memory Vector Store",
"version": 1,
"name": "memoryVectorStore",
"type": "Memory",
"baseClasses": ["Memory", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "In-memory vectorstore that stores embeddings and does an exact, linear search for the most similar embeddings.",
"inputParams": [
{
"label": "Top K",
"name": "topK",
"description": "Number of top results to fetch. Default to 4",
"placeholder": "4",
"type": "number",
"optional": true,
"id": "memoryVectorStore_0-input-topK-number"
}
],
"inputAnchors": [
{
"label": "Document",
"name": "document",
"type": "Document",
"list": true,
"id": "memoryVectorStore_0-input-document-Document"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "Embeddings",
"id": "memoryVectorStore_0-input-embeddings-Embeddings"
}
],
"inputs": {
"document": ["{{searchApi_0.data.instance}}", "{{searchApi_0.data.instance}}", "{{searchApi_0.data.instance}}"],
"embeddings": "{{openAIEmbeddings_0.data.instance}}",
"topK": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever",
"name": "retriever",
"label": "Memory Retriever",
"type": "Memory | VectorStoreRetriever | BaseRetriever"
},
{
"id": "memoryVectorStore_0-output-vectorStore-Memory|VectorStore",
"name": "vectorStore",
"label": "Memory Vector Store",
"type": "Memory | VectorStore"
}
],
"default": "retriever"
}
],
"outputs": {
"output": "retriever"
},
"selected": false
},
"positionAbsolute": {
"x": 1082.0280622332507,
"y": 589.9990964387842
},
"selected": false,
"dragging": false
},
{
"width": 300,
"height": 577,
"id": "chatOpenAI_0",
"position": {
"x": 1056.2788608917747,
"y": -60.59149112477064
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "gpt-3.5-turbo",
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "gpt-3.5-turbo",
"temperature": "0.5",
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1056.2788608917747,
"y": -60.59149112477064
},
"dragging": false
},
{
"width": 300,
"height": 478,
"id": "characterTextSplitter_0",
"position": {
"x": 260.5475803279806,
"y": -65.1647664861618
},
"type": "customNode",
"data": {
"id": "characterTextSplitter_0",
"label": "Character Text Splitter",
"version": 1,
"name": "characterTextSplitter",
"type": "CharacterTextSplitter",
"baseClasses": ["CharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"],
"category": "Text Splitters",
"description": "splits only on one type of character (defaults to \"\\n\\n\").",
"inputParams": [
{
"label": "Chunk Size",
"name": "chunkSize",
"type": "number",
"default": 1000,
"optional": true,
"id": "characterTextSplitter_0-input-chunkSize-number"
},
{
"label": "Chunk Overlap",
"name": "chunkOverlap",
"type": "number",
"optional": true,
"id": "characterTextSplitter_0-input-chunkOverlap-number"
},
{
"label": "Custom Separator",
"name": "separator",
"type": "string",
"placeholder": "\" \"",
"description": "Seperator to determine when to split the text, will override the default separator",
"optional": true,
"id": "characterTextSplitter_0-input-separator-string"
}
],
"inputAnchors": [],
"inputs": {
"chunkSize": "2000",
"chunkOverlap": "200",
"separator": ""
},
"outputAnchors": [
{
"id": "characterTextSplitter_0-output-characterTextSplitter-CharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"name": "characterTextSplitter",
"label": "CharacterTextSplitter",
"type": "CharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 260.5475803279806,
"y": -65.1647664861618
},
"dragging": false
},
{
"width": 300,
"height": 332,
"id": "openAIEmbeddings_0",
"position": {
"x": 666.3950526535211,
"y": 777.4191705193945
},
"type": "customNode",
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 3,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
"category": "Embeddings",
"description": "OpenAI API to generate embeddings for a given text",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "text-embedding-ada-002",
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
"type": "boolean",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-stripNewLines-boolean"
},
{
"label": "Batch Size",
"name": "batchSize",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-batchSize-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-basepath-string"
}
],
"inputAnchors": [],
"inputs": {
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
"id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"name": "openAIEmbeddings",
"label": "OpenAIEmbeddings",
"type": "OpenAIEmbeddings | Embeddings"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 666.3950526535211,
"y": 777.4191705193945
}
},
{
"width": 300,
"height": 482,
"id": "searchApi_0",
"position": {
"x": 680.1258121447145,
"y": 144.9905217023999
},
"type": "customNode",
"data": {
"id": "searchApi_0",
"label": "SearchApi",
"version": 1,
"name": "searchApi",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from real-time search results",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"optional": false,
"credentialNames": ["searchApi"],
"id": "searchApi_0-input-credential-credential"
},
{
"label": "Query",
"name": "query",
"type": "string",
"optional": true,
"id": "searchApi_0-input-query-string"
},
{
"label": "Custom Parameters",
"name": "customParameters",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "searchApi_0-input-customParameters-json"
},
{
"label": "Metadata",
"name": "metadata",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "searchApi_0-input-metadata-json"
}
],
"inputAnchors": [
{
"label": "Text Splitter",
"name": "textSplitter",
"type": "TextSplitter",
"optional": true,
"id": "searchApi_0-input-textSplitter-TextSplitter"
}
],
"inputs": {
"query": "",
"customParameters": "{\"engine\":\"youtube_transcripts\",\"video_id\":\"0e3GPea1Tyg\"}",
"textSplitter": "{{characterTextSplitter_0.data.instance}}",
"metadata": ""
},
"outputAnchors": [
{
"id": "searchApi_0-output-searchApi-Document",
"name": "searchApi",
"label": "Document",
"type": "Document"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 680.1258121447145,
"y": 144.9905217023999
},
"dragging": false
}
],
"edges": [
{
"source": "memoryVectorStore_0",
"sourceHandle": "memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever",
"target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
"type": "buttonedge",
"id": "memoryVectorStore_0-memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": {
"label": ""
}
},
{
"source": "openAIEmbeddings_0",
"sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"target": "memoryVectorStore_0",
"targetHandle": "memoryVectorStore_0-input-embeddings-Embeddings",
"type": "buttonedge",
"id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-memoryVectorStore_0-memoryVectorStore_0-input-embeddings-Embeddings",
"data": {
"label": ""
}
},
{
"source": "characterTextSplitter_0",
"sourceHandle": "characterTextSplitter_0-output-characterTextSplitter-CharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"target": "searchApi_0",
"targetHandle": "searchApi_0-input-textSplitter-TextSplitter",
"type": "buttonedge",
"id": "characterTextSplitter_0-characterTextSplitter_0-output-characterTextSplitter-CharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-searchApi_0-searchApi_0-input-textSplitter-TextSplitter",
"data": {
"label": ""
}
},
{
"source": "searchApi_0",
"sourceHandle": "searchApi_0-output-searchApi-Document",
"target": "memoryVectorStore_0",
"targetHandle": "memoryVectorStore_0-input-document-Document",
"type": "buttonedge",
"id": "searchApi_0-searchApi_0-output-searchApi-Document-memoryVectorStore_0-memoryVectorStore_0-input-document-Document",
"data": {
"label": ""
}
}
]
}

View File

@ -1,459 +0,0 @@
{
"description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA",
"categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
"height": 376,
"id": "bufferMemory_0",
"position": {
"x": 240.5161028076149,
"y": 165.35849026339048
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"version": 2,
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Retrieve chat messages stored in database",
"inputParams": [
{
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,
"id": "bufferMemory_0-input-sessionId-string"
},
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"additionalParams": true,
"id": "bufferMemory_0-input-memoryKey-string"
}
],
"inputAnchors": [],
"inputs": {
"sessionId": "",
"memoryKey": "chat_history"
},
"outputAnchors": [
{
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"name": "bufferMemory",
"label": "BufferMemory",
"type": "BufferMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 240.5161028076149,
"y": 165.35849026339048
},
"dragging": false
},
{
"width": 300,
"height": 383,
"id": "conversationChain_0",
"position": {
"x": 958.9887390513221,
"y": 318.8734467468765
},
"type": "customNode",
"data": {
"id": "conversationChain_0",
"label": "Conversation Chain",
"version": 3,
"name": "conversationChain",
"type": "ConversationChain",
"baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Chat models specific conversational chain with memory",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"description": "If Chat Prompt Template is provided, this will be ignored",
"additionalParams": true,
"optional": true,
"default": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"placeholder": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"id": "conversationChain_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"id": "conversationChain_0-input-model-BaseChatModel"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseMemory",
"id": "conversationChain_0-input-memory-BaseMemory"
},
{
"label": "Chat Prompt Template",
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
"optional": true,
"id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"inputModeration": "",
"model": "{{chatAnthropic_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"chatPromptTemplate": "{{chatPromptTemplate_0.data.instance}}",
"systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
},
"outputAnchors": [
{
"id": "conversationChain_0-output-conversationChain-ConversationChain|LLMChain|BaseChain|Runnable",
"name": "conversationChain",
"label": "ConversationChain",
"type": "ConversationChain | LLMChain | BaseChain | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 958.9887390513221,
"y": 318.8734467468765
},
"dragging": false
},
{
"width": 300,
"height": 574,
"id": "chatAnthropic_0",
"position": {
"x": 585.3308245972187,
"y": -116.32789506560908
},
"type": "customNode",
"data": {
"id": "chatAnthropic_0",
"label": "ChatAnthropic",
"version": 6.0,
"name": "chatAnthropic",
"type": "ChatAnthropic",
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["anthropicApi"],
"id": "chatAnthropic_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "claude-3-haiku",
"id": "chatAnthropic_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatAnthropic_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokensToSample",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-maxTokensToSample-number"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-topP-number"
},
{
"label": "Top K",
"name": "topK",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-topK-number"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatAnthropic_0-input-allowImageUploads-boolean"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatAnthropic_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "claude-3-haiku",
"temperature": 0.9,
"maxTokensToSample": "",
"topP": "",
"topK": "",
"allowImageUploads": true
},
"outputAnchors": [
{
"id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatAnthropic",
"label": "ChatAnthropic",
"type": "ChatAnthropic | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 585.3308245972187,
"y": -116.32789506560908
},
"dragging": false
},
{
"width": 300,
"height": 688,
"id": "chatPromptTemplate_0",
"position": {
"x": -106.44189698270114,
"y": 20.133956087516538
},
"type": "customNode",
"data": {
"id": "chatPromptTemplate_0",
"label": "Chat Prompt Template",
"version": 1,
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"],
"category": "Prompts",
"description": "Schema to represent a chat prompt",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.",
"id": "chatPromptTemplate_0-input-systemMessagePrompt-string"
},
{
"label": "Human Message",
"name": "humanMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "{text}",
"id": "chatPromptTemplate_0-input-humanMessagePrompt-string"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"id": "chatPromptTemplate_0-input-promptValues-json"
}
],
"inputAnchors": [],
"inputs": {
"systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\nThe AI has the following context:\n{context}",
"humanMessagePrompt": "{input}",
"promptValues": "{\"context\":\"{{plainText_0.data.instance}}\",\"input\":\"{{question}}\"}"
},
"outputAnchors": [
{
"id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"name": "chatPromptTemplate",
"label": "ChatPromptTemplate",
"type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -106.44189698270114,
"y": 20.133956087516538
},
"dragging": false
},
{
"width": 300,
"height": 485,
"id": "plainText_0",
"position": {
"x": -487.7511991135089,
"y": 77.83838996645807
},
"type": "customNode",
"data": {
"id": "plainText_0",
"label": "Plain Text",
"version": 2,
"name": "plainText",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from plain text",
"inputParams": [
{
"label": "Text",
"name": "text",
"type": "string",
"rows": 4,
"placeholder": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua...",
"id": "plainText_0-input-text-string"
},
{
"label": "Metadata",
"name": "metadata",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "plainText_0-input-metadata-json"
}
],
"inputAnchors": [
{
"label": "Text Splitter",
"name": "textSplitter",
"type": "TextSplitter",
"optional": true,
"id": "plainText_0-input-textSplitter-TextSplitter"
}
],
"inputs": {
"text": "Welcome to Skyworld Hotel, where your dreams take flight and your stay soars to new heights. Nestled amidst breathtaking cityscape views, our upscale establishment offers an unparalleled blend of luxury and comfort. Our rooms are elegantly appointed, featuring modern amenities and plush furnishings to ensure your relaxation.\n\nIndulge in culinary delights at our rooftop restaurant, offering a gastronomic journey with panoramic vistas. Skyworld Hotel boasts state-of-the-art conference facilities, perfect for business travelers, and an inviting spa for relaxation seekers. Our attentive staff is dedicated to ensuring your every need is met, making your stay memorable.\n\nCentrally located, we offer easy access to local attractions, making us an ideal choice for both leisure and business travelers. Experience the world of hospitality like never before at Skyworld Hotel.",
"textSplitter": "",
"metadata": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document | json"
},
{
"id": "plainText_0-output-text-string|json",
"name": "text",
"label": "Text",
"type": "string | json"
}
],
"default": "document"
}
],
"outputs": {
"output": "text"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -487.7511991135089,
"y": 77.83838996645807
},
"dragging": false
}
],
"edges": [
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-memory-BaseMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationChain_0-conversationChain_0-input-memory-BaseMemory"
},
{
"source": "chatAnthropic_0",
"sourceHandle": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatAnthropic_0-chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable-conversationChain_0-conversationChain_0-input-model-BaseChatModel"
},
{
"source": "plainText_0",
"sourceHandle": "plainText_0-output-text-string|json",
"target": "chatPromptTemplate_0",
"targetHandle": "chatPromptTemplate_0-input-promptValues-json",
"type": "buttonedge",
"id": "plainText_0-plainText_0-output-text-string|json-chatPromptTemplate_0-chatPromptTemplate_0-input-promptValues-json"
},
{
"source": "chatPromptTemplate_0",
"sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate",
"type": "buttonedge",
"id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-conversationChain_0-conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
}
]
}

View File

@ -1,8 +1,8 @@
{
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex",
"categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis",
"framework": "LlamaIndex",
"badge": "NEW",
"description": "Answer question based on retrieved documents (context) while remembering previous conversations",
"framework": ["LlamaIndex"],
"badge": "POPULAR",
"usecases": ["Documents QnA"],
"nodes": [
{
"width": 300,

View File

@ -1,7 +1,7 @@
{
"description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT",
"categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain",
"framework": "Langchain",
"usecases": ["Chatbot"],
"framework": ["Langchain"],
"badge": "POPULAR",
"nodes": [
{

View File

@ -1,7 +1,7 @@
{
"description": "A conversational agent for a chat model which utilize chat specific prompts",
"categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"description": "A conversational agent designed to use tools and chat model to provide responses",
"usecases": ["Agent"],
"framework": ["Langchain"],
"nodes": [
{
"width": 300,
@ -44,7 +44,7 @@
},
{
"width": 300,
"height": 376,
"height": 253,
"id": "bufferMemory_1",
"position": {
"x": 607.6260576768354,
@ -105,7 +105,7 @@
},
{
"width": 300,
"height": 277,
"height": 276,
"id": "serpAPI_0",
"position": {
"x": 451.83740798447855,
@ -152,7 +152,7 @@
},
{
"width": 300,
"height": 574,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 97.01321406237057,
@ -162,7 +162,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"version": 6,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -324,7 +324,7 @@
},
{
"width": 300,
"height": 383,
"height": 435,
"id": "conversationalAgent_0",
"position": {
"x": 1191.1524476753796,
@ -414,6 +414,59 @@
"y": 324.2479396683294
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 1190.081066428271,
"y": 21.014152635796393
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "This agent works very similar to Tool Agent with slightly higher error rate.\n\nDifference being this agent uses prompt to instruct LLM using tools, as opposed to using LLM's function calling capability.\n\nFor LLMs that support function calling, it is recommended to use Tool Agent.\n\nExample question:\n1. What is the net worth of Elon Musk?\n2. Multiply the net worth by 2"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 284,
"selected": false,
"positionAbsolute": {
"x": 1190.081066428271,
"y": 21.014152635796393
},
"dragging": false
}
],
"edges": [

View File

@ -1,12 +1,12 @@
{
"description": "Text file QnA using conversational retrieval QA chain",
"categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
"description": "Documents QnA using Retrieval Augmented Generation (RAG) with Mistral and FAISS for similarity search",
"badge": "POPULAR",
"framework": "Langchain",
"usecases": ["Documents QnA"],
"framework": ["Langchain"],
"nodes": [
{
"width": 300,
"height": 329,
"height": 424,
"id": "openAIEmbeddings_0",
"position": {
"x": 795.6162477805387,
@ -16,7 +16,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 3,
"version": 4,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -36,7 +36,7 @@
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "text-embedding-ada-002",
"id": "openAIEmbeddings_0-input-modelName-options"
"id": "openAIEmbeddings_0-input-modelName-asyncOptions"
},
{
"label": "Strip New Lines",
@ -69,21 +69,31 @@
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-basepath-string"
},
{
"label": "Dimensions",
"name": "dimensions",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-dimensions-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "text-embedding-ada-002",
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": "",
"modelName": "text-embedding-ada-002"
"dimensions": ""
},
"outputAnchors": [
{
"id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"name": "openAIEmbeddings",
"label": "OpenAIEmbeddings",
"description": "OpenAI API to generate embeddings for a given text",
"type": "OpenAIEmbeddings | Embeddings"
}
],
@ -99,7 +109,7 @@
},
{
"width": 300,
"height": 429,
"height": 430,
"id": "recursiveCharacterTextSplitter_0",
"position": {
"x": 406.08456707531263,
@ -168,7 +178,7 @@
},
{
"width": 300,
"height": 419,
"height": 421,
"id": "textFile_0",
"position": {
"x": 786.5497697231324,
@ -250,7 +260,7 @@
},
{
"width": 300,
"height": 480,
"height": 532,
"id": "conversationalRetrievalQAChain_0",
"position": {
"x": 1558.6564094656787,
@ -332,8 +342,8 @@
],
"inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"model": "{{chatMistralAI_0.data.instance}}",
"vectorStoreRetriever": "{{faiss_0.data.instance}}",
"memory": "",
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
@ -353,235 +363,33 @@
"x": 1558.6564094656787,
"y": 386.60217819991124
},
"selected": false
},
{
"width": 300,
"height": 574,
"id": "chatOpenAI_0",
"position": {
"x": 1194.3554779412727,
"y": -46.74877201166788
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6.0,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "gpt-3.5-turbo",
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1194.3554779412727,
"y": -46.74877201166788
},
"dragging": false
},
{
"width": 300,
"height": 555,
"id": "pinecone_0",
"id": "faiss_0",
"position": {
"x": 1192.4771449209463,
"y": 552.43946147251
"x": 1193.61786387649,
"y": 559.055052045731
},
"type": "customNode",
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"id": "faiss_0",
"label": "Faiss",
"version": 1,
"name": "faiss",
"type": "Faiss",
"baseClasses": ["Faiss", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity search upon query using Faiss library from Meta",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["pineconeApi"],
"id": "pinecone_0-input-credential-credential"
},
{
"label": "Pinecone Index",
"name": "pineconeIndex",
"label": "Base Path to load",
"name": "basePath",
"description": "Path to load faiss.index file",
"placeholder": "C:\\Users\\User\\Desktop",
"type": "string",
"id": "pinecone_0-input-pineconeIndex-string"
},
{
"label": "Pinecone Namespace",
"name": "pineconeNamespace",
"type": "string",
"placeholder": "my-first-namespace",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-pineconeNamespace-string"
},
{
"label": "Pinecone Metadata Filter",
"name": "pineconeMetadataFilter",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pinecone_0-input-pineconeMetadataFilter-json"
"id": "faiss_0-input-basePath-string"
},
{
"label": "Top K",
@ -591,46 +399,7 @@
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
"id": "faiss_0-input-topK-number"
}
],
"inputAnchors": [
@ -640,43 +409,41 @@
"type": "Document",
"list": true,
"optional": true,
"id": "pinecone_0-input-document-Document"
"id": "faiss_0-input-document-Document"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "Embeddings",
"id": "pinecone_0-input-embeddings-Embeddings"
"id": "faiss_0-input-embeddings-Embeddings"
}
],
"inputs": {
"document": ["{{textFile_0.data.instance}}"],
"document": ["{{textFile_0.data.instance}}", "{{documentStore_0.data.instance}}"],
"embeddings": "{{openAIEmbeddings_0.data.instance}}",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
"basePath": "",
"topK": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"description": "",
"options": [
{
"id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
"id": "faiss_0-output-retriever-Faiss|VectorStoreRetriever|BaseRetriever",
"name": "retriever",
"label": "Pinecone Retriever",
"type": "Pinecone | VectorStoreRetriever | BaseRetriever"
"label": "Faiss Retriever",
"description": "",
"type": "Faiss | VectorStoreRetriever | BaseRetriever"
},
{
"id": "pinecone_0-output-vectorStore-Pinecone|VectorStore",
"id": "faiss_0-output-vectorStore-Faiss|SaveableVectorStore|VectorStore",
"name": "vectorStore",
"label": "Pinecone Vector Store",
"type": "Pinecone | VectorStore"
"label": "Faiss Vector Store",
"description": "",
"type": "Faiss | SaveableVectorStore | VectorStore"
}
],
"default": "retriever"
@ -687,12 +454,265 @@
},
"selected": false
},
"width": 300,
"height": 459,
"selected": false,
"positionAbsolute": {
"x": 1192.4771449209463,
"y": 552.43946147251
"x": 1193.61786387649,
"y": 559.055052045731
},
"dragging": false
},
{
"id": "documentStore_0",
"position": {
"x": 785.3020265031932,
"y": -215.72424937010018
},
"type": "customNode",
"data": {
"id": "documentStore_0",
"label": "Document Store",
"version": 1,
"name": "documentStore",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from pre-configured document stores",
"inputParams": [
{
"label": "Select Store",
"name": "selectedStore",
"type": "asyncOptions",
"loadMethod": "listStores",
"id": "documentStore_0-input-selectedStore-asyncOptions"
}
],
"inputAnchors": [],
"inputs": {
"selectedStore": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"description": "Array of document objects containing metadata and pageContent",
"options": [
{
"id": "documentStore_0-output-document-Document|json",
"name": "document",
"label": "Document",
"description": "Array of document objects containing metadata and pageContent",
"type": "Document | json"
},
{
"id": "documentStore_0-output-text-string|json",
"name": "text",
"label": "Text",
"description": "Concatenated string from pageContent of documents",
"type": "string | json"
}
],
"default": "document"
}
],
"outputs": {
"output": "document"
},
"selected": false
},
"width": 300,
"height": 312,
"selected": false,
"positionAbsolute": {
"x": 785.3020265031932,
"y": -215.72424937010018
},
"dragging": false
},
{
"id": "stickyNote_0",
"position": {
"x": 1546.6369661154768,
"y": -107.3962162381467
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Conversational Retrieval QA Chain composes of 2 chains:\n\n1. A chain to rephrase user question using previous conversations\n2. A chain to provide response based on the context fetched from vector store.\n\nWhy is the need for rephrasing question?\nThis is to ensure that a follow-up question can be asked. For example:\n\n- What is the address of the Bakery shop?\n- What about the opening time?\n\nA rephrased question will be:\n- What is the opening time of the Bakery shop?\n\nThis ensure a better search to vector store, hence better output quality.\n"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 465,
"selected": false,
"positionAbsolute": {
"x": 1546.6369661154768,
"y": -107.3962162381467
},
"dragging": false
},
{
"id": "chatMistralAI_0",
"position": {
"x": 1185.9624817228073,
"y": -60.75719138037451
},
"type": "customNode",
"data": {
"id": "chatMistralAI_0",
"label": "ChatMistralAI",
"version": 3,
"name": "chatMistralAI",
"type": "ChatMistralAI",
"baseClasses": ["ChatMistralAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around Mistral large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["mistralAIApi"],
"id": "chatMistralAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "mistral-tiny",
"id": "chatMistralAI_0-input-modelName-asyncOptions"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"description": "What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatMistralAI_0-input-temperature-number"
},
{
"label": "Max Output Tokens",
"name": "maxOutputTokens",
"type": "number",
"description": "The maximum number of tokens to generate in the completion.",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatMistralAI_0-input-maxOutputTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"description": "Nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatMistralAI_0-input-topP-number"
},
{
"label": "Random Seed",
"name": "randomSeed",
"type": "number",
"description": "The seed to use for random sampling. If set, different calls will generate deterministic results.",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatMistralAI_0-input-randomSeed-number"
},
{
"label": "Safe Mode",
"name": "safeMode",
"type": "boolean",
"description": "Whether to inject a safety prompt before all conversations.",
"optional": true,
"additionalParams": true,
"id": "chatMistralAI_0-input-safeMode-boolean"
},
{
"label": "Override Endpoint",
"name": "overrideEndpoint",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatMistralAI_0-input-overrideEndpoint-string"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatMistralAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "mistral-tiny",
"temperature": 0.9,
"maxOutputTokens": "",
"topP": "",
"randomSeed": "",
"safeMode": "",
"overrideEndpoint": ""
},
"outputAnchors": [
{
"id": "chatMistralAI_0-output-chatMistralAI-ChatMistralAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatMistralAI",
"label": "ChatMistralAI",
"description": "Wrapper around Mistral large language models that use the Chat endpoint",
"type": "ChatMistralAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 574,
"positionAbsolute": {
"x": 1185.9624817228073,
"y": -60.75719138037451
},
"selected": false,
"dragging": false
}
],
"edges": [
@ -710,46 +730,42 @@
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-document-Document",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
"data": {
"label": ""
}
"id": "textFile_0-textFile_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document"
},
{
"source": "openAIEmbeddings_0",
"sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-embeddings-Embeddings",
"target": "faiss_0",
"targetHandle": "faiss_0-input-embeddings-Embeddings",
"type": "buttonedge",
"id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings",
"data": {
"label": ""
}
"id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-faiss_0-faiss_0-input-embeddings-Embeddings"
},
{
"source": "pinecone_0",
"sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
"source": "documentStore_0",
"sourceHandle": "documentStore_0-output-document-Document|json",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "documentStore_0-documentStore_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document"
},
{
"source": "faiss_0",
"sourceHandle": "faiss_0-output-retriever-Faiss|VectorStoreRetriever|BaseRetriever",
"target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
"type": "buttonedge",
"id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
"data": {
"label": ""
}
"id": "faiss_0-faiss_0-output-retriever-Faiss|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"source": "chatMistralAI_0",
"sourceHandle": "chatMistralAI_0-output-chatMistralAI-ChatMistralAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": {
"label": ""
}
"id": "chatMistralAI_0-chatMistralAI_0-output-chatMistralAI-ChatMistralAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}
]
}

View File

@ -1,12 +1,12 @@
{
"description": "Flowise Docs Github QnA using conversational retrieval QA chain",
"categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"description": "Flowise Docs Github QnA using Retrieval Augmented Generation (RAG)",
"badge": "POPULAR",
"framework": "Langchain",
"usecases": ["Documents QnA"],
"framework": ["Langchain"],
"nodes": [
{
"width": 300,
"height": 376,
"height": 378,
"id": "markdownTextSplitter_0",
"position": {
"x": 1081.1540334344143,
@ -16,8 +16,8 @@
"data": {
"id": "markdownTextSplitter_0",
"label": "Markdown Text Splitter",
"name": "markdownTextSplitter",
"version": 1,
"name": "markdownTextSplitter",
"type": "MarkdownTextSplitter",
"baseClasses": ["MarkdownTextSplitter", "RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer"],
"category": "Text Splitters",
@ -64,7 +64,7 @@
},
{
"width": 300,
"height": 405,
"height": 407,
"id": "memoryVectorStore_0",
"position": {
"x": 1844.88052464165,
@ -74,8 +74,8 @@
"data": {
"id": "memoryVectorStore_0",
"label": "In-Memory Vector Store",
"name": "memoryVectorStore",
"version": 1,
"name": "memoryVectorStore",
"type": "Memory",
"baseClasses": ["Memory", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
@ -147,18 +147,18 @@
},
{
"width": 300,
"height": 479,
"height": 532,
"id": "conversationalRetrievalQAChain_0",
"position": {
"x": 2311.697827287373,
"y": 228.14841720207832
"x": 2262.1986022669694,
"y": 229.38589782758842
},
"type": "customNode",
"data": {
"id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain",
"name": "conversationalRetrievalQAChain",
"version": 3,
"name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains",
@ -250,8 +250,8 @@
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 2311.697827287373,
"y": 228.14841720207832
"x": 2262.1986022669694,
"y": 229.38589782758842
}
},
{
@ -266,8 +266,8 @@
"data": {
"id": "github_0",
"label": "Github",
"name": "github",
"version": 2,
"name": "github",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
@ -377,18 +377,18 @@
},
{
"width": 300,
"height": 522,
"height": 670,
"id": "chatOpenAI_0",
"position": {
"x": 1857.367353502965,
"y": -104.25095383414119
"x": 1848.10147093022,
"y": -213.12507406389523
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 6,
"name": "chatOpenAI",
"version": 6.0,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -542,25 +542,25 @@
},
"selected": false,
"positionAbsolute": {
"x": 1857.367353502965,
"y": -104.25095383414119
"x": 1848.10147093022,
"y": -213.12507406389523
},
"dragging": false
},
{
"width": 300,
"height": 328,
"height": 424,
"id": "openAIEmbeddings_0",
"position": {
"x": 1299.9983863833309,
"y": 581.8406384863323
"x": 1114.6807349284306,
"y": 482.2324008293234
},
"type": "customNode",
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 4,
"name": "openAIEmbeddings",
"version": 3,
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
"category": "Embeddings",
@ -579,7 +579,7 @@
"type": "asyncOptions",
"loadMethod": "listModels",
"default": "text-embedding-ada-002",
"id": "openAIEmbeddings_0-input-modelName-options"
"id": "openAIEmbeddings_0-input-modelName-asyncOptions"
},
{
"label": "Strip New Lines",
@ -612,21 +612,31 @@
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-basepath-string"
},
{
"label": "Dimensions",
"name": "dimensions",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbeddings_0-input-dimensions-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "text-embedding-ada-002",
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": "",
"modelName": "text-embedding-ada-002"
"dimensions": ""
},
"outputAnchors": [
{
"id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
"name": "openAIEmbeddings",
"label": "OpenAIEmbeddings",
"description": "OpenAI API to generate embeddings for a given text",
"type": "OpenAIEmbeddings | Embeddings"
}
],
@ -636,9 +646,168 @@
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 1299.9983863833309,
"y": 581.8406384863323
"x": 1114.6807349284306,
"y": 482.2324008293234
}
},
{
"id": "stickyNote_0",
"position": {
"x": 1119.05414840041,
"y": 304.34680059348875
},
"type": "stickyNote",
"data": {
"id": "stickyNote_0",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_0-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Recursively load files from Github repo, and split into chunks according to Markdown syntax.\n\nFor private repo, you need to connect Github credential."
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 143,
"selected": false,
"positionAbsolute": {
"x": 1119.05414840041,
"y": 304.34680059348875
},
"dragging": false
},
{
"id": "stickyNote_1",
"position": {
"x": 1481.99061810943,
"y": 600.8550429213293
},
"type": "stickyNote",
"data": {
"id": "stickyNote_1",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_1-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Store the embeddings in-memory"
},
"outputAnchors": [
{
"id": "stickyNote_1-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 42,
"selected": false,
"positionAbsolute": {
"x": 1481.99061810943,
"y": 600.8550429213293
},
"dragging": false
},
{
"id": "stickyNote_2",
"position": {
"x": 2599.168985347108,
"y": 244.87044713398404
},
"type": "stickyNote",
"data": {
"id": "stickyNote_2",
"label": "Sticky Note",
"version": 2,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"tags": ["Utilities"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_2-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Conversational Retrieval QA Chain composes of 2 chains:\n\n1. A chain to rephrase user question using previous conversations\n2. A chain to provide response based on the context fetched from vector store.\n\nWhy is the need for rephrasing question?\nThis is to ensure that a follow-up question can be asked. For example:\n\n- What is the address of the Bakery shop?\n- What about the opening time?\n\nA rephrased question will be:\n- What is the opening time of the Bakery shop?\n\nThis ensure a better search to vector store, hence better output quality.\n"
},
"outputAnchors": [
{
"id": "stickyNote_2-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 465,
"selected": false,
"positionAbsolute": {
"x": 2599.168985347108,
"y": 244.87044713398404
},
"dragging": false
}
],
"edges": [

View File

@ -1,7 +1,7 @@
{
"description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model",
"categories": "HuggingFace,LLM Chain,Langchain",
"framework": "Langchain",
"framework": ["Langchain"],
"usecases": ["Basic"],
"nodes": [
{
"width": 300,

Some files were not shown because too many files have changed in this diff Show More