Merge branch 'main' into chore/Upgrade-LC-version

pull/1691/head
Henry 2024-02-19 17:39:32 +08:00
commit 5a45a99620
94 changed files with 2265 additions and 702 deletions

View File

@ -123,6 +123,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
| DEBUG | 打印组件的日志 | 布尔值 | |
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |

View File

@ -127,6 +127,7 @@ Flowise support different environment variables to configure your instance. You
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |

View File

@ -21,6 +21,7 @@ LOG_PATH=/root/.flowise/logs
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# FLOWISE_FILE_SIZE_LIMIT=50mb
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs

View File

@ -10,6 +10,7 @@ services:
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
- DEBUG=${DEBUG}
- DATABASE_PATH=${DATABASE_PATH}
- DATABASE_TYPE=${DATABASE_TYPE}

View File

@ -59,6 +59,10 @@ class ChatOpenAI_ChatModels implements INode {
label: 'gpt-4-1106-preview',
name: 'gpt-4-1106-preview'
},
{
label: 'gpt-4-1106-vision-preview',
name: 'gpt-4-1106-vision-preview'
},
{
label: 'gpt-4-vision-preview',
name: 'gpt-4-vision-preview'

View File

@ -126,7 +126,9 @@ class Cheerio_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -143,7 +145,7 @@ class Cheerio_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await cheerioLoader(page)))
}
} else {

View File

@ -51,11 +51,13 @@ class PlainText_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
baseClasses: this.baseClasses
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -167,7 +167,9 @@ class Playwright_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -184,7 +186,7 @@ class Playwright_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await playwrightLoader(page)))
}
} else {

View File

@ -168,7 +168,9 @@ class Puppeteer_DocumentLoaders implements INode {
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
// so when limit is 0 we can fetch all the links
if (limit === null || limit === undefined) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
selectedLinks && selectedLinks.length > 0
@ -185,7 +187,7 @@ class Puppeteer_DocumentLoaders implements INode {
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
for (const page of selectedLinks.slice(0, limit)) {
docs.push(...(await puppeteerLoader(page)))
}
} else {

View File

@ -51,11 +51,13 @@ class Text_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
baseClasses: this.baseClasses
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -51,11 +51,13 @@ class VectorStoreToDocument_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -28,12 +28,12 @@ class QueryEngine_LlamaIndex implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Query Engine'
this.name = 'queryEngine'
this.version = 1.0
this.version = 2.0
this.type = 'QueryEngine'
this.icon = 'query-engine.png'
this.category = 'Engine'
this.description = 'Simple query engine built to answer question over your data, without memory'
this.baseClasses = [this.type]
this.baseClasses = [this.type, 'BaseQueryEngine']
this.tags = ['LlamaIndex']
this.inputs = [
{
@ -59,52 +59,13 @@ class QueryEngine_LlamaIndex implements INode {
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
async init(nodeData: INodeData): Promise<any> {
return prepareEngine(nodeData)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever)
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
}
}
const queryEngine = prepareEngine(nodeData)
let text = ''
let sourceDocuments: ICommonObject[] = []
@ -140,4 +101,49 @@ class QueryEngine_LlamaIndex implements INode {
}
}
const prepareEngine = (nodeData: INodeData) => {
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever)
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext),
serviceContext: vectorStoreRetriever.serviceContext
})
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
}
}
return queryEngine
}
module.exports = { nodeClass: QueryEngine_LlamaIndex }

View File

@ -33,13 +33,13 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Sub Question Query Engine'
this.name = 'subQuestionQueryEngine'
this.version = 1.0
this.version = 2.0
this.type = 'SubQuestionQueryEngine'
this.icon = 'subQueryEngine.svg'
this.category = 'Engine'
this.description =
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
this.baseClasses = [this.type]
this.baseClasses = [this.type, 'BaseQueryEngine']
this.tags = ['LlamaIndex']
this.inputs = [
{
@ -76,85 +76,13 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
async init(nodeData: INodeData): Promise<any> {
return prepareEngine(nodeData)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const serviceContext = serviceContextFromDefaults({
llm: model,
embedModel: embeddings
})
let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[]
queryEngineTools = flatten(queryEngineTools)
let queryEngine = SubQuestionQueryEngine.fromDefaults({
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(serviceContext),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
}
}
const queryEngine = prepareEngine(nodeData)
let text = ''
let sourceDocuments: ICommonObject[] = []
@ -190,4 +118,82 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
}
}
const prepareEngine = (nodeData: INodeData) => {
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const serviceContext = serviceContextFromDefaults({
llm: model,
embedModel: embeddings
})
let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[]
queryEngineTools = flatten(queryEngineTools)
let queryEngine = SubQuestionQueryEngine.fromDefaults({
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(serviceContext),
serviceContext
})
queryEngine = SubQuestionQueryEngine.fromDefaults({
responseSynthesizer,
serviceContext,
queryEngineTools,
questionGen: new LLMQuestionGenerator({ llm: model })
})
}
}
return queryEngine
}
module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex }

View File

@ -117,7 +117,10 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
memoryKey: memoryKey ?? 'chat_history',
chatHistory: dynamoDb,
sessionId,
dynamodbClient: client
dynamodbClient: client,
tableName,
partitionKey,
dynamoKey: { [partitionKey]: { S: sessionId } }
})
return memory
}
@ -125,6 +128,9 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
interface BufferMemoryExtendedInput {
dynamodbClient: DynamoDBClient
sessionId: string
tableName: string
partitionKey: string
dynamoKey: Record<string, AttributeValue>
}
interface DynamoDBSerializedChatMessage {
@ -142,6 +148,10 @@ interface DynamoDBSerializedChatMessage {
}
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
private tableName = ''
private partitionKey = ''
private dynamoKey: Record<string, AttributeValue>
private messageAttributeName: string
sessionId = ''
dynamodbClient: DynamoDBClient
@ -149,11 +159,14 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
super(fields)
this.sessionId = fields.sessionId
this.dynamodbClient = fields.dynamodbClient
this.tableName = fields.tableName
this.partitionKey = fields.partitionKey
this.dynamoKey = fields.dynamoKey
}
overrideDynamoKey(overrideSessionId = '') {
const existingDynamoKey = (this as any).dynamoKey
const partitionKey = (this as any).partitionKey
const existingDynamoKey = this.dynamoKey
const partitionKey = this.partitionKey
let newDynamoKey: Record<string, AttributeValue> = {}
@ -209,9 +222,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.dynamodbClient) return []
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const messageAttributeName = this.messageAttributeName
const params: GetItemCommandInput = {
TableName: tableName,
@ -236,9 +249,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const messageAttributeName = this.messageAttributeName
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -259,8 +272,8 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey
const tableName = this.tableName
const params: DeleteItemCommandInput = {
TableName: tableName,

View File

@ -154,7 +154,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.collection) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const document = await this.collection.findOne({ sessionId: id })
const messages = document?.messages || []
const baseMessages = messages.map(mapStoredMessageToChatMessage)
@ -164,7 +164,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -196,7 +196,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.collection.deleteOne({ sessionId: id })
await this.clear()
}

View File

@ -141,7 +141,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
try {
const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, {
//@ts-ignore
@ -172,7 +172,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
@ -182,7 +182,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.clear(id)
}
}

View File

@ -189,7 +189,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage)
@ -199,7 +199,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -219,7 +219,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.redisClient.del(id)
await this.clear()
}

View File

@ -114,7 +114,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange<StoredMessage>(id, 0, -1)
const orderedMessages = rawStoredMessages.reverse()
const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined)
@ -125,7 +125,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
@ -145,7 +145,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.redisClient.del(id)
await this.clear()
}

View File

@ -162,14 +162,14 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods {
}
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const memoryVariables = await this.loadMemoryVariables({}, id)
const baseMessages = memoryVariables[this.memoryKey]
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
@ -179,7 +179,7 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods {
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const id = overrideSessionId ? overrideSessionId : this.sessionId
await this.clear(id)
}
}

View File

@ -0,0 +1,79 @@
import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src'
import { BaseOutputParser } from 'langchain/schema/output_parser'
import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers'
import { CATEGORY } from '../OutputParserHelpers'
import { z } from 'zod'
class AdvancedStructuredOutputParser implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'Advanced Structured Output Parser'
this.name = 'advancedStructuredOutputParser'
this.version = 1.0
this.type = 'AdvancedStructuredOutputParser'
this.description = 'Parse the output of an LLM call into a given structure by providing a Zod schema.'
this.icon = 'structure.svg'
this.category = CATEGORY
this.baseClasses = [this.type, ...getBaseClasses(BaseOutputParser)]
this.inputs = [
{
label: 'Autofix',
name: 'autofixParser',
type: 'boolean',
optional: true,
description: 'In the event that the first call fails, will make another call to the model to fix any errors.'
},
{
label: 'Example JSON',
name: 'exampleJson',
type: 'string',
description: 'Zod schema for the output of the model',
rows: 10,
default: `z.object({
title: z.string(), // Title of the movie as a string
yearOfRelease: z.number().int(), // Release year as an integer number,
genres: z.enum([
"Action", "Comedy", "Drama", "Fantasy", "Horror",
"Mystery", "Romance", "Science Fiction", "Thriller", "Documentary"
]).array().max(2), // Array of genres, max of 2 from the defined enum
shortDescription: z.string().max(500) // Short description, max 500 characters
})`
}
]
}
async init(nodeData: INodeData): Promise<any> {
const schemaString = nodeData.inputs?.exampleJson as string
const autoFix = nodeData.inputs?.autofixParser as boolean
const zodSchemaFunction = new Function('z', `return ${schemaString}`)
const zodSchema = zodSchemaFunction(z)
try {
const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema)
// NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser
Object.defineProperty(structuredOutputParser, 'autoFix', {
enumerable: true,
configurable: true,
writable: true,
value: autoFix
})
return structuredOutputParser
} catch (exception) {
throw new Error('Error parsing Zod Schema: ' + exception)
}
}
}
module.exports = { nodeClass: AdvancedStructuredOutputParser }

View File

@ -0,0 +1,8 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M16 3V13M16 3L13 6.13609M16 3L19 6.13609" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M7 15V12C7 10.8954 7.89543 10 9 10H11M25 15V12C25 10.8954 24.1046 10 23 10H21" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.5644 20.4399C11.6769 19.7608 9 19.6332 9 21.7961C9 24.1915 13 22.5657 13 25.0902C13 26.9875 10.33 27.5912 9 26.3537" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M24 27V20L28 27V20" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M16 23.5C16 20.7 17.6667 20 18.5 20C19.3333 20 21 20.7 21 23.5C21 26.3 19.3333 27 18.5 27C17.6667 27 16 26.3 16 23.5Z" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 20V25C6 26.1046 5.10457 27 4 27V27C2.89543 27 2 26.1046 2 25V25" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -94,11 +94,13 @@ class CohereRerankRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -78,11 +78,13 @@ class EmbeddingsFilterRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -140,11 +140,13 @@ Passage:`
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -58,11 +58,13 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -89,11 +89,13 @@ class RRFRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -74,11 +74,13 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
description: 'Array of document objects containing metadata and pageContent',
baseClasses: ['Document', 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]

View File

@ -100,6 +100,7 @@ export class DynamicStructuredTool<
return result
}
// @ts-ignore
protected async _call(
arg: z.output<T>,
_?: CallbackManagerForToolRun,

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { VectorStoreIndex } from 'llamaindex'
import { BaseQueryEngine } from 'llamaindex'
class QueryEngine_Tools implements INode {
label: string
@ -16,7 +16,7 @@ class QueryEngine_Tools implements INode {
constructor() {
this.label = 'QueryEngine Tool'
this.name = 'queryEngineToolLlamaIndex'
this.version = 1.0
this.version = 2.0
this.type = 'QueryEngineTool'
this.icon = 'queryEngineTool.svg'
this.category = 'Tools'
@ -25,9 +25,9 @@ class QueryEngine_Tools implements INode {
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Vector Store Index',
name: 'vectorStoreIndex',
type: 'VectorStoreIndex'
label: 'Base QueryEngine',
name: 'baseQueryEngine',
type: 'BaseQueryEngine'
},
{
label: 'Tool Name',
@ -45,20 +45,15 @@ class QueryEngine_Tools implements INode {
}
async init(nodeData: INodeData): Promise<any> {
const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex
const baseQueryEngine = nodeData.inputs?.baseQueryEngine as BaseQueryEngine
const toolName = nodeData.inputs?.toolName as string
const toolDesc = nodeData.inputs?.toolDesc as string
const queryEngineTool = {
queryEngine: vectorStoreIndex.asQueryEngine({
preFilters: {
...(vectorStoreIndex as any).metadatafilter
}
}),
queryEngine: baseQueryEngine,
metadata: {
name: toolName,
description: toolDesc
},
vectorStoreIndex
}
}
return queryEngineTool

View File

@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode {
constructor() {
this.label = 'Postgres'
this.name = 'postgres'
this.version = 2.0
this.version = 3.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
@ -60,13 +60,6 @@ class Postgres_VectorStores implements INode {
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
@ -124,7 +117,6 @@ class Postgres_VectorStores implements INode {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const additionalConfig = nodeData.inputs?.additionalConfig as string
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
@ -142,8 +134,7 @@ class Postgres_VectorStores implements INode {
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string,
ssl: sslConnection
database: nodeData.inputs?.database as string
}
const args = {
@ -198,7 +189,8 @@ class Postgres_VectorStores implements INode {
type: 'postgres',
host: nodeData.inputs?.host as string,
port: nodeData.inputs?.port as number,
username: user,
username: user, // Required by TypeORMVectorStore
user: user, // Required by Pool in similaritySearchVectorWithScore
password: password,
database: nodeData.inputs?.database as string
}
@ -248,14 +240,7 @@ const similaritySearchVectorWithScore = async (
ORDER BY "_distance" ASC
LIMIT $3;`
const poolOptions = {
host: postgresConnectionOptions.host,
port: postgresConnectionOptions.port,
user: postgresConnectionOptions.username,
password: postgresConnectionOptions.password,
database: postgresConnectionOptions.database
}
const pool = new Pool(poolOptions)
const pool = new Pool(postgresConnectionOptions)
const conn = await pool.connect()
const documents = await conn.query(queryString, [embeddingString, _filter, k])

View File

@ -20,6 +20,7 @@ PORT=3000
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# FLOWISE_FILE_SIZE_LIMIT=50mb
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs

View File

@ -1,5 +1,7 @@
{
"description": "Use OpenAI Function Agent and Chain to automatically decide which API to call, generating url and body request from conversation",
"categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,OpenAI Function Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -123,6 +125,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -143,6 +149,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -462,6 +472,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -482,6 +496,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation",
"categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -431,6 +433,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -451,6 +457,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -622,6 +632,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -642,6 +656,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -813,6 +831,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -833,6 +855,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -0,0 +1,468 @@
{
"description": "Return response as a JSON structure as specified by a Zod schema",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 508,
"id": "llmChain_0",
"position": {
"x": 1229.1699649849293,
"y": 245.55173505632646
},
"type": "customNode",
"data": {
"id": "llmChain_0",
"label": "LLM Chain",
"version": 3,
"name": "llmChain",
"type": "LLMChain",
"baseClasses": ["LLMChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Chain to run queries against LLMs",
"inputParams": [
{
"label": "Chain Name",
"name": "chainName",
"type": "string",
"placeholder": "Name Your Chain",
"optional": true,
"id": "llmChain_0-input-chainName-string"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "llmChain_0-input-model-BaseLanguageModel"
},
{
"label": "Prompt",
"name": "prompt",
"type": "BasePromptTemplate",
"id": "llmChain_0-input-prompt-BasePromptTemplate"
},
{
"label": "Output Parser",
"name": "outputParser",
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{chatPromptTemplate_0.data.instance}}",
"outputParser": "{{advancedStructuredOutputParser_0.data.instance}}",
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
"name": "llmChain",
"label": "LLM Chain",
"type": "LLMChain | BaseChain | Runnable"
},
{
"id": "llmChain_0-output-outputPrediction-string|json",
"name": "outputPrediction",
"label": "Output Prediction",
"type": "string | json"
}
],
"default": "llmChain"
}
],
"outputs": {
"output": "llmChain"
},
"selected": false
},
"positionAbsolute": {
"x": 1229.1699649849293,
"y": 245.55173505632646
},
"selected": false
},
{
"width": 300,
"height": 690,
"id": "chatPromptTemplate_0",
"position": {
"x": 493.26582927222483,
"y": -156.20470841335592
},
"type": "customNode",
"data": {
"id": "chatPromptTemplate_0",
"label": "Chat Prompt Template",
"version": 1,
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"],
"category": "Prompts",
"description": "Schema to represent a chat prompt",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.",
"id": "chatPromptTemplate_0-input-systemMessagePrompt-string"
},
{
"label": "Human Message",
"name": "humanMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "{text}",
"id": "chatPromptTemplate_0-input-humanMessagePrompt-string"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"id": "chatPromptTemplate_0-input-promptValues-json"
}
],
"inputAnchors": [],
"inputs": {
"systemMessagePrompt": "This AI is designed to only output information in JSON format without exception. This AI can only output JSON and will never output any other text.\n\nWhen asked to correct itself, this AI will only output the corrected JSON and never any other text.",
"humanMessagePrompt": "{text}",
"promptValues": ""
},
"outputAnchors": [
{
"id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"name": "chatPromptTemplate",
"label": "ChatPromptTemplate",
"type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 493.26582927222483,
"y": -156.20470841335592
},
"dragging": false
},
{
"width": 300,
"height": 576,
"id": "chatOpenAI_0",
"position": {
"x": 860.555928011636,
"y": -355.71028569475095
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "",
"temperature": "0",
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 860.555928011636,
"y": -355.71028569475095
},
"dragging": false
},
{
"width": 300,
"height": 454,
"id": "advancedStructuredOutputParser_0",
"position": {
"x": 489.3637511211284,
"y": 580.0628053662244
},
"type": "customNode",
"data": {
"id": "advancedStructuredOutputParser_0",
"label": "Advanced Structured Output Parser",
"version": 1,
"name": "advancedStructuredOutputParser",
"type": "AdvancedStructuredOutputParser",
"baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"],
"category": "Output Parsers",
"description": "Parse the output of an LLM call into a given structure by providing a Zod schema.",
"inputParams": [
{
"label": "Autofix",
"name": "autofixParser",
"type": "boolean",
"optional": true,
"description": "In the event that the first call fails, will make another call to the model to fix any errors.",
"id": "advancedStructuredOutputParser_0-input-autofixParser-boolean"
},
{
"label": "Example JSON",
"name": "exampleJson",
"type": "string",
"description": "Zod schema for the output of the model",
"rows": 10,
"default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})",
"id": "advancedStructuredOutputParser_0-input-exampleJson-string"
}
],
"inputAnchors": [],
"inputs": {
"autofixParser": "",
"exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})"
},
"outputAnchors": [
{
"id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable",
"name": "advancedStructuredOutputParser",
"label": "AdvancedStructuredOutputParser",
"type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 489.3637511211284,
"y": 580.0628053662244
}
}
],
"edges": [
{
"source": "chatPromptTemplate_0",
"sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
"type": "buttonedge",
"id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel"
},
{
"source": "advancedStructuredOutputParser_0",
"sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser",
"type": "buttonedge",
"id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser"
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "Output antonym of given user input using few-shot prompt template built with examples",
"categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -210,6 +212,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -230,6 +236,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion",
"categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -286,6 +288,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -306,6 +312,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use BabyAGI to create tasks and reprioritize for a given objective",
"categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -379,6 +381,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -399,6 +405,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Analyse and summarize CSV data",
"categories": "CSV Agent,ChatOpenAI,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -104,6 +106,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -124,6 +130,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions",
"categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -229,6 +231,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -249,6 +255,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools",
"categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -249,6 +251,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -269,6 +275,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA",
"categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -439,10 +441,10 @@
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document",
"id": "plainText_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_0-output-text-string|json",

View File

@ -1,5 +1,7 @@
{
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex",
"categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis",
"framework": "LlamaIndex",
"badge": "NEW",
"nodes": [
{
@ -57,10 +59,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -849,11 +851,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "A conversational agent for a chat model which utilize chat specific prompts",
"categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -191,6 +193,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -211,6 +217,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Agent optimized for vector retrieval during conversation and answering questions based on previous dialogue.",
"categories": "Retriever Tool,Buffer Memory,ChatOpenAI,Conversational Retrieval Agent, Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -522,6 +524,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -542,6 +548,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Text file QnA using conversational retrieval QA chain",
"categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -233,10 +235,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -404,6 +406,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -424,6 +430,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -730,11 +740,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,6 +1,8 @@
{
"description": "Flowise Docs Github QnA using conversational retrieval QA chain",
"categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -410,6 +412,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -430,6 +436,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model",
"categories": "HuggingFace,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Split flows based on if else condition",
"categories": "IfElse Function,ChatOpenAI,OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "new",
"nodes": [
{
@ -943,6 +945,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -963,6 +969,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Generate image using Replicate Stability text-to-image generative AI model",
"badge": "NEW",
"categories": "Replicate,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -487,6 +489,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -507,6 +513,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"badge": "NEW",
"categories": "Moderation,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -199,6 +201,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -219,6 +225,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Return response as a list (array) instead of a string/text",
"badge": "NEW",
"categories": "CSV Output Parser,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -258,6 +260,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -278,6 +284,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "QnA chain using Ollama local LLM, LocalAI embedding model, and Faiss local vector store",
"badge": "POPULAR",
"categories": "Text File,ChatOllama,Conversational Retrieval QA Chain,Faiss,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -224,10 +226,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -649,11 +651,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-faiss_0-faiss_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "Use long term memory like Zep to differentiate conversations between users with sessionId",
"categories": "ChatOpenAI,Conversational Retrieval QA Chain,Zep Memory,Qdrant,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -541,6 +543,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -561,6 +567,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,6 +1,8 @@
{
"description": "Upsert multiple files with metadata and filter by it using conversational retrieval QA chain",
"categories": "Text File,PDF File,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -126,10 +128,10 @@
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"id": "textFile_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "textFile_0-output-text-string|json",
@ -488,6 +490,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -508,6 +514,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -836,11 +846,11 @@
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"sourceHandle": "textFile_0-output-document-Document|json",
"target": "pinecone_0",
"targetHandle": "pinecone_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document",
"id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "A chain that automatically picks an appropriate prompt from multiple prompts",
"categories": "ChatOpenAI,Multi Prompt Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -312,6 +314,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -332,6 +338,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "A chain that automatically picks an appropriate retriever from multiple different vector databases",
"categories": "ChatOpenAI,Multi Retrieval QA Chain,Pinecone,Chroma,Supabase,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -423,6 +425,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -443,6 +449,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use the agent to choose between multiple different vector databases, with the ability to use other tools",
"categories": "Buffer Memory,ChatOpenAI,Chain Tool,Retrieval QA Chain,Redis,Faiss,Conversational Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -520,6 +522,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -540,6 +546,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -964,10 +974,10 @@
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document",
"id": "plainText_0-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_0-output-text-string|json",
@ -1038,6 +1048,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -1058,6 +1072,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -1236,6 +1254,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -1256,6 +1278,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -1501,10 +1527,10 @@
"type": "options",
"options": [
{
"id": "plainText_1-output-document-Document",
"id": "plainText_1-output-document-Document|json",
"name": "document",
"label": "Document",
"type": "Document"
"type": "Document | json"
},
{
"id": "plainText_1-output-text-string|json",
@ -1721,11 +1747,11 @@
},
{
"source": "plainText_0",
"sourceHandle": "plainText_0-output-document-Document",
"sourceHandle": "plainText_0-output-document-Document|json",
"target": "redis_0",
"targetHandle": "redis_0-input-document-Document",
"type": "buttonedge",
"id": "plainText_0-plainText_0-output-document-Document-redis_0-redis_0-input-document-Document",
"id": "plainText_0-plainText_0-output-document-Document|json-redis_0-redis_0-input-document-Document",
"data": {
"label": ""
}
@ -1776,11 +1802,11 @@
},
{
"source": "plainText_1",
"sourceHandle": "plainText_1-output-document-Document",
"sourceHandle": "plainText_1-output-document-Document|json",
"target": "faiss_0",
"targetHandle": "faiss_0-input-document-Document",
"type": "buttonedge",
"id": "plainText_1-plainText_1-output-document-Document-faiss_0-faiss_0-input-document-Document",
"id": "plainText_1-plainText_1-output-document-Document|json-faiss_0-faiss_0-input-document-Document",
"data": {
"label": ""
}

View File

@ -1,5 +1,7 @@
{
"description": "An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call",
"categories": "Buffer Memory,Custom Tool, SerpAPI,OpenAI Function,Calculator Tool,ChatOpenAI,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -314,6 +316,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -334,6 +340,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "OpenAI Assistant that has instructions and can leverage models, tools, and knowledge to respond to user queries",
"categories": "Custom Tool, SerpAPI,OpenAI Assistant,Calculator Tool,Langchain",
"framework": "Langchain",
"badge": "NEW",
"nodes": [
{

View File

@ -1,6 +1,8 @@
{
"description": "Use chat history to rephrase user question, and answer the rephrased question using retrieved docs from vector store",
"categories": "ChatOpenAI,LLM Chain,SingleStore,Langchain",
"badge": "POPULAR",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -462,6 +464,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -482,6 +488,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -660,6 +670,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -680,6 +694,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use output from a chain as prompt for another chain",
"categories": "Custom Tool,OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,6 +1,8 @@
{
"description": "Stateless query engine designed to answer question over your data using LlamaIndex",
"categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex",
"badge": "NEW",
"framework": "LlamaIndex",
"nodes": [
{
"width": 300,
@ -14,10 +16,10 @@
"data": {
"id": "queryEngine_0",
"label": "Query Engine",
"version": 1,
"version": 2,
"name": "queryEngine",
"type": "QueryEngine",
"baseClasses": ["QueryEngine"],
"baseClasses": ["QueryEngine", "BaseQueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
@ -53,10 +55,10 @@
},
"outputAnchors": [
{
"id": "queryEngine_0-output-queryEngine-QueryEngine",
"id": "queryEngine_0-output-queryEngine-QueryEngine|BaseQueryEngine",
"name": "queryEngine",
"label": "QueryEngine",
"type": "QueryEngine"
"type": "QueryEngine | BaseQueryEngine"
}
],
"outputs": {},

View File

@ -1,5 +1,7 @@
{
"description": "An agent that uses ReAct logic to decide what action to take",
"categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -134,6 +136,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -154,6 +160,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Use Replicate API that runs Llama 13b v2 model with LLMChain",
"categories": "Replicate,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Answer questions over a SQL database",
"categories": "ChatOpenAI,Sql Database Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -48,6 +50,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -68,6 +74,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Manually construct prompts to query a SQL database",
"categories": "IfElse Function,Variable Set/Get,Custom JS Function,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "new",
"nodes": [
{
@ -208,6 +210,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -228,6 +234,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -406,6 +416,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -426,6 +440,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -1330,6 +1348,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -1350,6 +1372,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Simple chat engine to handle back and forth conversations using LlamaIndex",
"categories": "BufferMemory,AzureChatOpenAI,LlamaIndex",
"framework": "LlamaIndex",
"badge": "NEW",
"nodes": [
{

View File

@ -1,5 +1,7 @@
{
"description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT",
"categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain",
"framework": "Langchain",
"badge": "POPULAR",
"nodes": [
{
@ -49,6 +51,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -69,6 +75,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Basic example of stateless (no memory) LLM Chain with a Prompt Template and LLM Model",
"categories": "OpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Return response as a specified JSON structure instead of a string/text",
"categories": "Structured Output Parser,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"badge": "NEW",
"nodes": [
{
@ -49,6 +51,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -69,6 +75,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Breaks down query into sub questions for each relevant data source, then combine into final response",
"categories": "Sub Question Query Engine,Sticky Note,QueryEngine Tool,Compact and Refine,ChatOpenAI,Pinecone,LlamaIndex",
"framework": "LlamaIndex",
"badge": "NEW",
"nodes": [
{
@ -7,8 +9,8 @@
"height": 749,
"id": "compactrefineLlamaIndex_0",
"position": {
"x": -1214.7329938486841,
"y": 56.52482754447425
"x": -443.9012456561584,
"y": 826.6100190232154
},
"type": "customNode",
"data": {
@ -61,8 +63,8 @@
},
"selected": false,
"positionAbsolute": {
"x": -1214.7329938486841,
"y": 56.52482754447425
"x": -443.9012456561584,
"y": 826.6100190232154
},
"dragging": false
},
@ -71,8 +73,8 @@
"height": 611,
"id": "pineconeLlamaIndex_0",
"position": {
"x": 37.23548045607484,
"y": -119.7364648743818
"x": 35.45798119088212,
"y": -132.1789597307308
},
"type": "customNode",
"data": {
@ -181,14 +183,14 @@
}
],
"outputs": {
"output": "vectorStore"
"output": "retriever"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 37.23548045607484,
"y": -119.7364648743818
"x": 35.45798119088212,
"y": -132.1789597307308
},
"dragging": false
},
@ -403,79 +405,13 @@
"y": -127.15143353229783
}
},
{
"width": 300,
"height": 511,
"id": "queryEngineToolLlamaIndex_0",
"position": {
"x": 460.37559236135905,
"y": -565.6224030941121
},
"type": "customNode",
"data": {
"id": "queryEngineToolLlamaIndex_0",
"label": "QueryEngine Tool",
"version": 1,
"name": "queryEngineToolLlamaIndex",
"type": "QueryEngineTool",
"baseClasses": ["QueryEngineTool"],
"tags": ["LlamaIndex"],
"category": "Tools",
"description": "Execute actions using ChatGPT Plugin Url",
"inputParams": [
{
"label": "Tool Name",
"name": "toolName",
"type": "string",
"description": "Tool name must be small capital letter with underscore. Ex: my_tool",
"id": "queryEngineToolLlamaIndex_0-input-toolName-string"
},
{
"label": "Tool Description",
"name": "toolDesc",
"type": "string",
"rows": 4,
"id": "queryEngineToolLlamaIndex_0-input-toolDesc-string"
}
],
"inputAnchors": [
{
"label": "Vector Store Index",
"name": "vectorStoreIndex",
"type": "VectorStoreIndex",
"id": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex"
}
],
"inputs": {
"vectorStoreIndex": "{{pineconeLlamaIndex_1.data.instance}}",
"toolName": "apple_tool",
"toolDesc": "A SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period."
},
"outputAnchors": [
{
"id": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool",
"name": "queryEngineToolLlamaIndex",
"label": "QueryEngineTool",
"type": "QueryEngineTool"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 460.37559236135905,
"y": -565.6224030941121
},
"dragging": false
},
{
"width": 300,
"height": 611,
"id": "pineconeLlamaIndex_1",
"position": {
"x": 42.17855025460784,
"y": -839.8824444107056
"x": 43.95604951980056,
"y": -783.0024679245387
},
"type": "customNode",
"data": {
@ -584,162 +520,14 @@
}
],
"outputs": {
"output": "vectorStore"
"output": "retriever"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 42.17855025460784,
"y": -839.8824444107056
},
"dragging": false
},
{
"width": 300,
"height": 511,
"id": "queryEngineToolLlamaIndex_1",
"position": {
"x": 462.16721384216123,
"y": -17.750065363429798
},
"type": "customNode",
"data": {
"id": "queryEngineToolLlamaIndex_1",
"label": "QueryEngine Tool",
"version": 1,
"name": "queryEngineToolLlamaIndex",
"type": "QueryEngineTool",
"baseClasses": ["QueryEngineTool"],
"tags": ["LlamaIndex"],
"category": "Tools",
"description": "Execute actions using ChatGPT Plugin Url",
"inputParams": [
{
"label": "Tool Name",
"name": "toolName",
"type": "string",
"description": "Tool name must be small capital letter with underscore. Ex: my_tool",
"id": "queryEngineToolLlamaIndex_1-input-toolName-string"
},
{
"label": "Tool Description",
"name": "toolDesc",
"type": "string",
"rows": 4,
"id": "queryEngineToolLlamaIndex_1-input-toolDesc-string"
}
],
"inputAnchors": [
{
"label": "Vector Store Index",
"name": "vectorStoreIndex",
"type": "VectorStoreIndex",
"id": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex"
}
],
"inputs": {
"vectorStoreIndex": "{{pineconeLlamaIndex_0.data.instance}}",
"toolName": "tesla_tool",
"toolDesc": "A SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period."
},
"outputAnchors": [
{
"id": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool",
"name": "queryEngineToolLlamaIndex",
"label": "QueryEngineTool",
"type": "QueryEngineTool"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 462.16721384216123,
"y": -17.750065363429798
},
"dragging": false
},
{
"width": 300,
"height": 484,
"id": "subQuestionQueryEngine_0",
"position": {
"x": 982.7583030231563,
"y": 349.50858200305896
},
"type": "customNode",
"data": {
"id": "subQuestionQueryEngine_0",
"label": "Sub Question Query Engine",
"version": 1,
"name": "subQuestionQueryEngine",
"type": "SubQuestionQueryEngine",
"baseClasses": ["SubQuestionQueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "subQuestionQueryEngine_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "QueryEngine Tools",
"name": "queryEngineTools",
"type": "QueryEngineTool",
"list": true,
"id": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"queryEngineTools": ["{{queryEngineToolLlamaIndex_1.data.instance}}", "{{queryEngineToolLlamaIndex_0.data.instance}}"],
"model": "{{chatOpenAI_LlamaIndex_1.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_1.data.instance}}",
"responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
"returnSourceDocuments": true
},
"outputAnchors": [
{
"id": "subQuestionQueryEngine_0-output-subQuestionQueryEngine-SubQuestionQueryEngine",
"name": "subQuestionQueryEngine",
"label": "SubQuestionQueryEngine",
"type": "SubQuestionQueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 982.7583030231563,
"y": 349.50858200305896
"x": 43.95604951980056,
"y": -783.0024679245387
},
"dragging": false
},
@ -748,8 +536,8 @@
"height": 529,
"id": "chatOpenAI_LlamaIndex_1",
"position": {
"x": -846.9087470244615,
"y": 23.446501495097493
"x": -446.80851289432655,
"y": 246.8790997755625
},
"type": "customNode",
"data": {
@ -882,8 +670,8 @@
},
"selected": false,
"positionAbsolute": {
"x": -846.9087470244615,
"y": 23.446501495097493
"x": -446.80851289432655,
"y": 246.8790997755625
},
"dragging": false
},
@ -892,8 +680,8 @@
"height": 334,
"id": "openAIEmbedding_LlamaIndex_1",
"position": {
"x": -437.3136244622061,
"y": 329.99986619821175
"x": -37.812177549447284,
"y": 577.9112529482311
},
"type": "customNode",
"data": {
@ -950,17 +738,370 @@
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": -437.3136244622061,
"y": 329.99986619821175
"x": -37.812177549447284,
"y": 577.9112529482311
}
},
{
"width": 300,
"height": 382,
"id": "queryEngine_0",
"position": {
"x": 416.2466817793368,
"y": -600.1335182096643
},
"type": "customNode",
"data": {
"id": "queryEngine_0",
"label": "Query Engine",
"version": 2,
"name": "queryEngine",
"type": "QueryEngine",
"baseClasses": ["QueryEngine", "BaseQueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "queryEngine_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"vectorStoreRetriever": "{{pineconeLlamaIndex_1.data.instance}}",
"responseSynthesizer": "",
"returnSourceDocuments": ""
},
"outputAnchors": [
{
"id": "queryEngine_0-output-queryEngine-QueryEngine|BaseQueryEngine",
"name": "queryEngine",
"label": "QueryEngine",
"description": "Simple query engine built to answer question over your data, without memory",
"type": "QueryEngine | BaseQueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 416.2466817793368,
"y": -600.1335182096643
},
"dragging": false
},
{
"width": 300,
"height": 511,
"id": "queryEngineToolLlamaIndex_2",
"position": {
"x": 766.9839000102993,
"y": -654.6926410455919
},
"type": "customNode",
"data": {
"id": "queryEngineToolLlamaIndex_2",
"label": "QueryEngine Tool",
"version": 2,
"name": "queryEngineToolLlamaIndex",
"type": "QueryEngineTool",
"baseClasses": ["QueryEngineTool"],
"tags": ["LlamaIndex"],
"category": "Tools",
"description": "Tool used to invoke query engine",
"inputParams": [
{
"label": "Tool Name",
"name": "toolName",
"type": "string",
"description": "Tool name must be small capital letter with underscore. Ex: my_tool",
"id": "queryEngineToolLlamaIndex_2-input-toolName-string"
},
{
"label": "Tool Description",
"name": "toolDesc",
"type": "string",
"rows": 4,
"id": "queryEngineToolLlamaIndex_2-input-toolDesc-string"
}
],
"inputAnchors": [
{
"label": "Base QueryEngine",
"name": "baseQueryEngine",
"type": "BaseQueryEngine",
"id": "queryEngineToolLlamaIndex_2-input-baseQueryEngine-BaseQueryEngine"
}
],
"inputs": {
"baseQueryEngine": "{{queryEngine_0.data.instance}}",
"toolName": "apple_tool",
"toolDesc": "A SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period."
},
"outputAnchors": [
{
"id": "queryEngineToolLlamaIndex_2-output-queryEngineToolLlamaIndex-QueryEngineTool",
"name": "queryEngineToolLlamaIndex",
"label": "QueryEngineTool",
"description": "Tool used to invoke query engine",
"type": "QueryEngineTool"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 766.9839000102993,
"y": -654.6926410455919
},
"dragging": false
},
{
"width": 300,
"height": 511,
"id": "queryEngineToolLlamaIndex_1",
"position": {
"x": 771.5434180813253,
"y": -109.03650423344013
},
"type": "customNode",
"data": {
"id": "queryEngineToolLlamaIndex_1",
"label": "QueryEngine Tool",
"version": 2,
"name": "queryEngineToolLlamaIndex",
"type": "QueryEngineTool",
"baseClasses": ["QueryEngineTool"],
"tags": ["LlamaIndex"],
"category": "Tools",
"description": "Tool used to invoke query engine",
"inputParams": [
{
"label": "Tool Name",
"name": "toolName",
"type": "string",
"description": "Tool name must be small capital letter with underscore. Ex: my_tool",
"id": "queryEngineToolLlamaIndex_1-input-toolName-string"
},
{
"label": "Tool Description",
"name": "toolDesc",
"type": "string",
"rows": 4,
"id": "queryEngineToolLlamaIndex_1-input-toolDesc-string"
}
],
"inputAnchors": [
{
"label": "Base QueryEngine",
"name": "baseQueryEngine",
"type": "BaseQueryEngine",
"id": "queryEngineToolLlamaIndex_1-input-baseQueryEngine-BaseQueryEngine"
}
],
"inputs": {
"baseQueryEngine": "{{queryEngine_1.data.instance}}",
"toolName": "tesla_tool",
"toolDesc": "A SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period."
},
"outputAnchors": [
{
"id": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool",
"name": "queryEngineToolLlamaIndex",
"label": "QueryEngineTool",
"description": "Tool used to invoke query engine",
"type": "QueryEngineTool"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 771.5434180813253,
"y": -109.03650423344013
},
"dragging": false
},
{
"width": 300,
"height": 382,
"id": "queryEngine_1",
"position": {
"x": 411.8632262885343,
"y": -68.91392354277994
},
"type": "customNode",
"data": {
"id": "queryEngine_1",
"label": "Query Engine",
"version": 2,
"name": "queryEngine",
"type": "QueryEngine",
"baseClasses": ["QueryEngine", "BaseQueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "queryEngine_1-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "queryEngine_1-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "queryEngine_1-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
"responseSynthesizer": "",
"returnSourceDocuments": ""
},
"outputAnchors": [
{
"id": "queryEngine_1-output-queryEngine-QueryEngine|BaseQueryEngine",
"name": "queryEngine",
"label": "QueryEngine",
"description": "Simple query engine built to answer question over your data, without memory",
"type": "QueryEngine | BaseQueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 411.8632262885343,
"y": -68.91392354277994
},
"dragging": false
},
{
"width": 300,
"height": 484,
"id": "subQuestionQueryEngine_0",
"position": {
"x": 1204.489328490966,
"y": 347.2090726754211
},
"type": "customNode",
"data": {
"id": "subQuestionQueryEngine_0",
"label": "Sub Question Query Engine",
"version": 2,
"name": "subQuestionQueryEngine",
"type": "SubQuestionQueryEngine",
"baseClasses": ["SubQuestionQueryEngine", "BaseQueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "subQuestionQueryEngine_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "QueryEngine Tools",
"name": "queryEngineTools",
"type": "QueryEngineTool",
"list": true,
"id": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"queryEngineTools": ["{{queryEngineToolLlamaIndex_2.data.instance}}", "{{queryEngineToolLlamaIndex_1.data.instance}}"],
"model": "{{chatOpenAI_LlamaIndex_1.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_1.data.instance}}",
"responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
"returnSourceDocuments": true
},
"outputAnchors": [
{
"id": "subQuestionQueryEngine_0-output-subQuestionQueryEngine-SubQuestionQueryEngine|BaseQueryEngine",
"name": "subQuestionQueryEngine",
"label": "SubQuestionQueryEngine",
"description": "Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response",
"type": "SubQuestionQueryEngine | BaseQueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1204.489328490966,
"y": 347.2090726754211
},
"dragging": false
},
{
"width": 300,
"height": 82,
"id": "stickyNote_0",
"position": {
"x": 35.90892935132143,
"y": -936.1282632923861
"x": 1208.1786832265154,
"y": 238.26647262900994
},
"type": "stickyNote",
"data": {
@ -985,13 +1126,14 @@
],
"inputAnchors": [],
"inputs": {
"note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"apple\"}"
"note": "Break questions into subqueries, then retrieve corresponding context using queryengine tools"
},
"outputAnchors": [
{
"id": "stickyNote_0-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
@ -1000,8 +1142,8 @@
},
"selected": false,
"positionAbsolute": {
"x": 35.90892935132143,
"y": -936.1282632923861
"x": 1208.1786832265154,
"y": 238.26647262900994
},
"dragging": false
},
@ -1010,8 +1152,8 @@
"height": 82,
"id": "stickyNote_1",
"position": {
"x": 37.74909394815296,
"y": -215.17456133022054
"x": 416.8958270395809,
"y": -179.9680840754678
},
"type": "stickyNote",
"data": {
@ -1036,13 +1178,14 @@
],
"inputAnchors": [],
"inputs": {
"note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"tesla\"}"
"note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"<company>\"}"
},
"outputAnchors": [
{
"id": "stickyNote_1-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"description": "Add a sticky note",
"type": "StickyNote"
}
],
@ -1051,59 +1194,8 @@
},
"selected": false,
"positionAbsolute": {
"x": 37.74909394815296,
"y": -215.17456133022054
},
"dragging": false
},
{
"width": 300,
"height": 163,
"id": "stickyNote_2",
"position": {
"x": 984.9543031068163,
"y": 171.04264459503852
},
"type": "stickyNote",
"data": {
"id": "stickyNote_2",
"label": "Sticky Note",
"version": 1,
"name": "stickyNote",
"type": "StickyNote",
"baseClasses": ["StickyNote"],
"category": "Utilities",
"description": "Add a sticky note",
"inputParams": [
{
"label": "",
"name": "note",
"type": "string",
"rows": 1,
"placeholder": "Type something here",
"optional": true,
"id": "stickyNote_2-input-note-string"
}
],
"inputAnchors": [],
"inputs": {
"note": "Break questions into subqueries, then retrieve corresponding context using queryengine tool.\n\nThis implementation does not contains memory, we can use OpenAI Agent to function call this flow"
},
"outputAnchors": [
{
"id": "stickyNote_2-output-stickyNote-StickyNote",
"name": "stickyNote",
"label": "StickyNote",
"type": "StickyNote"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 984.9543031068163,
"y": 171.04264459503852
"x": 416.8958270395809,
"y": -179.9680840754678
},
"dragging": false
}
@ -1126,20 +1218,60 @@
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex"
},
{
"source": "pineconeLlamaIndex_1",
"sourceHandle": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex",
"target": "queryEngineToolLlamaIndex_0",
"targetHandle": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex",
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "pineconeLlamaIndex_1-pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex"
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
},
{
"source": "chatOpenAI_LlamaIndex_0",
"sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
},
{
"source": "pineconeLlamaIndex_1",
"sourceHandle": "pineconeLlamaIndex_1-output-retriever-Pinecone|VectorIndexRetriever",
"target": "queryEngine_0",
"targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_1-pineconeLlamaIndex_1-output-retriever-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"source": "queryEngine_0",
"sourceHandle": "queryEngine_0-output-queryEngine-QueryEngine|BaseQueryEngine",
"target": "queryEngineToolLlamaIndex_2",
"targetHandle": "queryEngineToolLlamaIndex_2-input-baseQueryEngine-BaseQueryEngine",
"type": "buttonedge",
"id": "queryEngine_0-queryEngine_0-output-queryEngine-QueryEngine|BaseQueryEngine-queryEngineToolLlamaIndex_2-queryEngineToolLlamaIndex_2-input-baseQueryEngine-BaseQueryEngine"
},
{
"source": "pineconeLlamaIndex_0",
"sourceHandle": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex",
"target": "queryEngineToolLlamaIndex_1",
"targetHandle": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex",
"sourceHandle": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
"target": "queryEngine_1",
"targetHandle": "queryEngine_1-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex"
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever-queryEngine_1-queryEngine_1-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"source": "queryEngine_1",
"sourceHandle": "queryEngine_1-output-queryEngine-QueryEngine|BaseQueryEngine",
"target": "queryEngineToolLlamaIndex_1",
"targetHandle": "queryEngineToolLlamaIndex_1-input-baseQueryEngine-BaseQueryEngine",
"type": "buttonedge",
"id": "queryEngine_1-queryEngine_1-output-queryEngine-QueryEngine|BaseQueryEngine-queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-input-baseQueryEngine-BaseQueryEngine"
},
{
"source": "queryEngineToolLlamaIndex_2",
"sourceHandle": "queryEngineToolLlamaIndex_2-output-queryEngineToolLlamaIndex-QueryEngineTool",
"target": "subQuestionQueryEngine_0",
"targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool",
"type": "buttonedge",
"id": "queryEngineToolLlamaIndex_2-queryEngineToolLlamaIndex_2-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool"
},
{
"source": "queryEngineToolLlamaIndex_1",
@ -1149,14 +1281,6 @@
"type": "buttonedge",
"id": "queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool"
},
{
"source": "queryEngineToolLlamaIndex_0",
"sourceHandle": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool",
"target": "subQuestionQueryEngine_0",
"targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool",
"type": "buttonedge",
"id": "queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool"
},
{
"source": "chatOpenAI_LlamaIndex_1",
"sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM",
@ -1180,22 +1304,6 @@
"targetHandle": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
"type": "buttonedge",
"id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
},
{
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
},
{
"source": "chatOpenAI_LlamaIndex_0",
"sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
}
]
}

View File

@ -1,5 +1,7 @@
{
"description": "Language translation using LLM Chain with a Chat Prompt Template and Chat Model",
"categories": "Chat Prompt Template,ChatOpenAI,LLM Chain,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -117,6 +119,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -137,6 +143,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,4 +1,7 @@
{
"description": "QA chain for Vectara",
"categories": "Vectara QA Chain,Vectara,Langchain",
"framework": "Langchain",
"nodes": [
{
"width": 300,

View File

@ -1,5 +1,7 @@
{
"description": "Conversational Agent with ability to visit a website and extract information",
"categories": "Buffer Memory,Web Browser,ChatOpenAI,Conversational Agent",
"framework": "Langchain",
"nodes": [
{
"width": 300,
@ -160,6 +162,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -180,6 +186,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
@ -458,6 +468,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -478,6 +492,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,7 @@
{
"description": "Scrape web pages for QnA with long term memory Motorhead and return source documents",
"categories": "HtmlToMarkdown,Cheerio Web Scraper,ChatOpenAI,Redis,Pinecone,Langchain",
"framework": "Langchain",
"badge": "POPULAR",
"nodes": [
{
@ -427,6 +429,10 @@
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-1106-vision-preview",
"name": "gpt-4-1106-vision-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
@ -447,6 +453,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0125",
"name": "gpt-3.5-turbo-0125"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"

View File

@ -1,5 +1,6 @@
{
"name": "add_contact_hubspot",
"framework": "Langchain",
"description": "Add new contact to Hubspot",
"color": "linear-gradient(rgb(85,198,123), rgb(0,230,99))",
"iconSrc": "https://cdn.worldvectorlogo.com/logos/hubspot-1.svg",

View File

@ -1,5 +1,6 @@
{
"name": "add_airtable",
"framework": "Langchain",
"description": "Add column1, column2 to Airtable",
"color": "linear-gradient(rgb(125,71,222), rgb(128,102,23))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/airtable.svg",

View File

@ -1,5 +1,6 @@
{
"name": "todays_date_time",
"framework": "Langchain",
"description": "Useful to get todays day, date and time.",
"color": "linear-gradient(rgb(117,118,129), rgb(230,10,250))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/javascript.svg",

View File

@ -1,5 +1,6 @@
{
"name": "get_stock_movers",
"framework": "Langchain",
"description": "Get the stocks that has biggest price/volume moves, e.g. actives, gainers, losers, etc.",
"iconSrc": "https://rapidapi.com/cdn/images?url=https://rapidapi-prod-apis.s3.amazonaws.com/9c/e743343bdd41edad39a3fdffd5b974/016c33699f51603ae6fe4420c439124b.png",
"color": "linear-gradient(rgb(191,202,167), rgb(143,202,246))",

View File

@ -1,5 +1,6 @@
{
"name": "make_webhook",
"framework": "Langchain",
"description": "Useful when you need to send message to Discord",
"color": "linear-gradient(rgb(19,94,2), rgb(19,124,59))",
"iconSrc": "https://github.com/FlowiseAI/Flowise/assets/26460777/517fdab2-8a6e-4781-b3c8-fb92cc78aa0b",

View File

@ -1,5 +1,6 @@
{
"name": "send_message_to_discord_channel",
"framework": "Langchain",
"description": "Send message to Discord channel",
"color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/discord-icon.svg",

View File

@ -1,5 +1,6 @@
{
"name": "send_message_to_slack_channel",
"framework": "Langchain",
"description": "Send message to Slack channel",
"color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/slack-icon.svg",

View File

@ -1,5 +1,6 @@
{
"name": "send_message_to_teams_channel",
"framework": "Langchain",
"description": "Send message to Teams channel",
"color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/microsoft-teams.svg",

View File

@ -1,5 +1,6 @@
{
"name": "sendgrid_email",
"framework": "Langchain",
"description": "Send email using SendGrid",
"color": "linear-gradient(rgb(230,108,70), rgb(222,4,98))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/sendgrid-icon.svg",

View File

@ -18,6 +18,7 @@ export default class Start extends Command {
static flags = {
FLOWISE_USERNAME: Flags.string(),
FLOWISE_PASSWORD: Flags.string(),
FLOWISE_FILE_SIZE_LIMIT: Flags.string(),
PORT: Flags.string(),
CORS_ORIGINS: Flags.string(),
IFRAME_ORIGINS: Flags.string(),
@ -91,6 +92,9 @@ export default class Start extends Command {
if (flags.FLOWISE_PASSWORD) process.env.FLOWISE_PASSWORD = flags.FLOWISE_PASSWORD
if (flags.APIKEY_PATH) process.env.APIKEY_PATH = flags.APIKEY_PATH
//API Configuration
if (flags.FLOWISE_FILE_SIZE_LIMIT) process.env.FLOWISE_FILE_SIZE_LIMIT = flags.FLOWISE_FILE_SIZE_LIMIT
// Credentials
if (flags.SECRETKEY_PATH) process.env.SECRETKEY_PATH = flags.SECRETKEY_PATH
if (flags.FLOWISE_SECRETKEY_OVERWRITE) process.env.FLOWISE_SECRETKEY_OVERWRITE = flags.FLOWISE_SECRETKEY_OVERWRITE

View File

@ -10,7 +10,7 @@ import logger from './utils/logger'
import { expressRequestLogger } from './utils/logger'
import { v4 as uuidv4 } from 'uuid'
import OpenAI from 'openai'
import { Between, IsNull, FindOptionsWhere } from 'typeorm'
import { FindOptionsWhere, MoreThanOrEqual, LessThanOrEqual } from 'typeorm'
import {
IChatFlow,
IncomingInput,
@ -120,8 +120,9 @@ export class App {
async config(socketIO?: Server) {
// Limit is needed to allow sending/receiving base64 encoded string
this.app.use(express.json({ limit: '50mb' }))
this.app.use(express.urlencoded({ limit: '50mb', extended: true }))
const flowise_file_size_limit = process.env.FLOWISE_FILE_SIZE_LIMIT ?? '50mb'
this.app.use(express.json({ limit: flowise_file_size_limit }))
this.app.use(express.urlencoded({ limit: flowise_file_size_limit, extended: true }))
if (process.env.NUMBER_OF_PROXIES && parseInt(process.env.NUMBER_OF_PROXIES) > 0)
this.app.set('trust proxy', parseInt(process.env.NUMBER_OF_PROXIES))
@ -184,7 +185,7 @@ export class App {
this.app.get('/api/v1/ip', (request, response) => {
response.send({
ip: request.ip,
msg: 'See the returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 until the IP address matches your own. Visit https://docs.flowiseai.com/deployment#rate-limit-setup-guide for more information.'
msg: 'Check returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 and restart Cloud-Hosted Flowise until the IP address matches your own. Visit https://docs.flowiseai.com/configuration/rate-limit#cloud-hosted-rate-limit-setup-guide for more information.'
})
})
@ -510,6 +511,7 @@ export class App {
const chatId = req.query?.chatId as string | undefined
const memoryType = req.query?.memoryType as string | undefined
const sessionId = req.query?.sessionId as string | undefined
const messageId = req.query?.messageId as string | undefined
const startDate = req.query?.startDate as string | undefined
const endDate = req.query?.endDate as string | undefined
let chatTypeFilter = req.query?.chatType as chatType | undefined
@ -537,7 +539,8 @@ export class App {
memoryType,
sessionId,
startDate,
endDate
endDate,
messageId
)
return res.json(chatmessages)
})
@ -1148,8 +1151,14 @@ export class App {
this.app.get('/api/v1/fetch-links', async (req: Request, res: Response) => {
const url = decodeURIComponent(req.query.url as string)
const relativeLinksMethod = req.query.relativeLinksMethod as string
if (!relativeLinksMethod) {
return res.status(500).send('Please choose a Relative Links Method in Additional Parameters.')
}
const limit = parseInt(req.query.limit as string)
if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`)
const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, 0) : await xmlScrape(url, 0)
const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, limit) : await xmlScrape(url, limit)
if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`)
res.json({ status: 'OK', links })
})
@ -1223,50 +1232,52 @@ export class App {
// Marketplaces
// ----------------------------------------
// Get all chatflows for marketplaces
this.app.get('/api/v1/marketplaces/chatflows', async (req: Request, res: Response) => {
const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows')
const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json')
const templates: any[] = []
// Get all templates for marketplaces
this.app.get('/api/v1/marketplaces/templates', async (req: Request, res: Response) => {
let marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows')
let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json')
let templates: any[] = []
jsonsInDir.forEach((file, index) => {
const filePath = path.join(__dirname, '..', 'marketplaces', 'chatflows', file)
const fileData = fs.readFileSync(filePath)
const fileDataObj = JSON.parse(fileData.toString())
const template = {
id: index,
name: file.split('.json')[0],
templateName: file.split('.json')[0],
flowData: fileData.toString(),
badge: fileDataObj?.badge,
framework: fileDataObj?.framework,
categories: fileDataObj?.categories,
type: 'Chatflow',
description: fileDataObj?.description || ''
}
templates.push(template)
})
marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools')
jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json')
jsonsInDir.forEach((file, index) => {
const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file)
const fileData = fs.readFileSync(filePath)
const fileDataObj = JSON.parse(fileData.toString())
const template = {
...fileDataObj,
id: index,
type: 'Tool',
framework: fileDataObj?.framework,
badge: fileDataObj?.badge,
categories: '',
templateName: file.split('.json')[0]
}
templates.push(template)
})
const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA')
const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA')
if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) {
templates.splice(FlowiseDocsQnAIndex, 1)
templates.unshift(FlowiseDocsQnA)
}
return res.json(templates)
})
// Get all tools for marketplaces
this.app.get('/api/v1/marketplaces/tools', async (req: Request, res: Response) => {
const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools')
const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json')
const templates: any[] = []
jsonsInDir.forEach((file, index) => {
const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file)
const fileData = fs.readFileSync(filePath)
const fileDataObj = JSON.parse(fileData.toString())
const template = {
...fileDataObj,
id: index,
templateName: file.split('.json')[0]
}
templates.push(template)
})
return res.json(templates)
return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName)))
})
// ----------------------------------------
@ -1431,22 +1442,34 @@ export class App {
memoryType?: string,
sessionId?: string,
startDate?: string,
endDate?: string
endDate?: string,
messageId?: string
): Promise<ChatMessage[]> {
const setDateToStartOrEndOfDay = (dateTimeStr: string, setHours: 'start' | 'end') => {
const date = new Date(dateTimeStr)
if (isNaN(date.getTime())) {
return undefined
}
setHours === 'start' ? date.setHours(0, 0, 0, 0) : date.setHours(23, 59, 59, 999)
return date
}
let fromDate
if (startDate) fromDate = new Date(startDate)
if (startDate) fromDate = setDateToStartOrEndOfDay(startDate, 'start')
let toDate
if (endDate) toDate = new Date(endDate)
if (endDate) toDate = setDateToStartOrEndOfDay(endDate, 'end')
return await this.AppDataSource.getRepository(ChatMessage).find({
where: {
chatflowid,
chatType,
chatId,
memoryType: memoryType ?? (chatId ? IsNull() : undefined),
memoryType: memoryType ?? undefined,
sessionId: sessionId ?? undefined,
createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined
...(fromDate && { createdDate: MoreThanOrEqual(fromDate) }),
...(toDate && { createdDate: LessThanOrEqual(toDate) }),
id: messageId ?? undefined
},
order: {
createdDate: sortOrder === 'DESC' ? 'DESC' : 'ASC'

View File

@ -10,7 +10,8 @@ module.exports = {
}
}
]
}
},
ignoreWarnings: [/Failed to parse source map/] // Ignore warnings about source maps
}
}
}

View File

@ -2,8 +2,10 @@ import client from './client'
const getAllChatflowsMarketplaces = () => client.get('/marketplaces/chatflows')
const getAllToolsMarketplaces = () => client.get('/marketplaces/tools')
const getAllTemplatesFromMarketplaces = () => client.get('/marketplaces/templates')
export default {
getAllChatflowsMarketplaces,
getAllToolsMarketplaces
getAllToolsMarketplaces,
getAllTemplatesFromMarketplaces
}

View File

@ -1,8 +1,8 @@
import client from './client'
const fetchAllLinks = (url, relativeLinksMethod) =>
client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}`)
const fetchLinks = (url, relativeLinksMethod, relativeLinksLimit) =>
client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}&limit=${relativeLinksLimit}`)
export default {
fetchAllLinks
fetchLinks
}

View File

@ -16,7 +16,7 @@ import {
Stack,
Typography
} from '@mui/material'
import { IconTrash } from '@tabler/icons'
import { IconTrash, IconX } from '@tabler/icons'
import PerfectScrollbar from 'react-perfect-scrollbar'
import { BackdropLoader } from 'ui-component/loading/BackdropLoader'
@ -24,12 +24,23 @@ import { StyledButton } from 'ui-component/button/StyledButton'
import scraperApi from 'api/scraper'
import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from 'store/actions'
import useNotifier from 'utils/useNotifier'
import {
HIDE_CANVAS_DIALOG,
SHOW_CANVAS_DIALOG,
enqueueSnackbar as enqueueSnackbarAction,
closeSnackbar as closeSnackbarAction
} from 'store/actions'
const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => {
const portalElement = document.getElementById('portal')
const dispatch = useDispatch()
useNotifier()
const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args))
const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args))
const [loading, setLoading] = useState(false)
const [selectedLinks, setSelectedLinks] = useState([])
const [url, setUrl] = useState('')
@ -53,9 +64,38 @@ const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => {
const handleFetchLinks = async () => {
setLoading(true)
const fetchLinksResp = await scraperApi.fetchAllLinks(url, 'webCrawl')
if (fetchLinksResp.data) {
setSelectedLinks(fetchLinksResp.data.links)
try {
const fetchLinksResp = await scraperApi.fetchLinks(url, dialogProps.relativeLinksMethod, dialogProps.limit)
if (fetchLinksResp.data) {
setSelectedLinks(fetchLinksResp.data.links)
enqueueSnackbar({
message: 'Successfully fetched links',
options: {
key: new Date().getTime() + Math.random(),
variant: 'success',
action: (key) => (
<Button style={{ color: 'white' }} onClick={() => closeSnackbar(key)}>
<IconX />
</Button>
)
}
})
}
} catch (error) {
const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}`
enqueueSnackbar({
message: errorData,
options: {
key: new Date().getTime() + Math.random(),
variant: 'error',
persist: true,
action: (key) => (
<Button style={{ color: 'white' }} onClick={() => closeSnackbar(key)}>
<IconX />
</Button>
)
}
})
}
setLoading(false)
}

View File

@ -0,0 +1,146 @@
import PropTypes from 'prop-types'
import { styled } from '@mui/material/styles'
import Table from '@mui/material/Table'
import TableBody from '@mui/material/TableBody'
import TableCell, { tableCellClasses } from '@mui/material/TableCell'
import TableContainer from '@mui/material/TableContainer'
import TableHead from '@mui/material/TableHead'
import TableRow from '@mui/material/TableRow'
import Paper from '@mui/material/Paper'
import Chip from '@mui/material/Chip'
import { Button, Typography } from '@mui/material'
const StyledTableCell = styled(TableCell)(({ theme }) => ({
[`&.${tableCellClasses.head}`]: {
backgroundColor: theme.palette.common.black,
color: theme.palette.common.white
},
[`&.${tableCellClasses.body}`]: {
fontSize: 14
}
}))
const StyledTableRow = styled(TableRow)(({ theme }) => ({
'&:nth-of-type(odd)': {
backgroundColor: theme.palette.action.hover
},
// hide last border
'&:last-child td, &:last-child th': {
border: 0
}
}))
export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterByType, filterByFramework, goToCanvas, goToTool }) => {
const openTemplate = (selectedTemplate) => {
if (selectedTemplate.flowData) {
goToCanvas(selectedTemplate)
} else {
goToTool(selectedTemplate)
}
}
return (
<>
<TableContainer style={{ marginTop: '30', border: 1 }} component={Paper}>
<Table sx={{ minWidth: 650 }} size='small' aria-label='a dense table'>
<TableHead>
<TableRow sx={{ marginTop: '10', backgroundColor: 'primary' }}>
<StyledTableCell component='th' scope='row' style={{ width: '15%' }} key='0'>
Name
</StyledTableCell>
<StyledTableCell component='th' scope='row' style={{ width: '5%' }} key='1'>
Type
</StyledTableCell>
<StyledTableCell style={{ width: '35%' }} key='2'>
Description
</StyledTableCell>
<StyledTableCell style={{ width: '35%' }} key='3'>
Nodes
</StyledTableCell>
<StyledTableCell component='th' scope='row' style={{ width: '5%' }} key='4'>
&nbsp;
</StyledTableCell>
</TableRow>
</TableHead>
<TableBody>
{data
.filter(filterByBadge)
.filter(filterByType)
.filter(filterFunction)
.filter(filterByFramework)
.map((row, index) => (
<StyledTableRow key={index}>
<TableCell key='0'>
<Typography
sx={{ fontSize: '1.2rem', fontWeight: 500, overflowWrap: 'break-word', whiteSpace: 'pre-line' }}
>
<Button onClick={() => openTemplate(row)} sx={{ textAlign: 'left' }}>
{row.templateName || row.name}
</Button>
</Typography>
</TableCell>
<TableCell key='1'>
<Typography>{row.type}</Typography>
</TableCell>
<TableCell key='2'>
<Typography sx={{ overflowWrap: 'break-word', whiteSpace: 'pre-line' }}>
{row.description || ''}
</Typography>
</TableCell>
<TableCell key='3'>
<div
style={{
display: 'flex',
flexDirection: 'row',
flexWrap: 'wrap',
marginTop: 5
}}
>
{row.categories &&
row.categories
.split(',')
.map((tag, index) => (
<Chip
variant='outlined'
key={index}
size='small'
label={tag.toUpperCase()}
style={{ marginRight: 3, marginBottom: 3 }}
/>
))}
</div>
</TableCell>
<TableCell key='4'>
<Typography>
{row.badge &&
row.badge
.split(';')
.map((tag, index) => (
<Chip
color={tag === 'POPULAR' ? 'primary' : 'error'}
key={index}
size='small'
label={tag.toUpperCase()}
style={{ marginRight: 5, marginBottom: 5 }}
/>
))}
</Typography>
</TableCell>
</StyledTableRow>
))}
</TableBody>
</Table>
</TableContainer>
</>
)
}
MarketplaceTable.propTypes = {
data: PropTypes.array,
filterFunction: PropTypes.func,
filterByBadge: PropTypes.func,
filterByType: PropTypes.func,
filterByFramework: PropTypes.func,
goToTool: PropTypes.func,
goToCanvas: PropTypes.func
}

View File

@ -99,6 +99,7 @@ export const initNode = (nodeData, newNodeId) => {
id: `${newNodeId}-output-${nodeData.outputs[j].name}-${baseClasses}`,
name: nodeData.outputs[j].name,
label: nodeData.outputs[j].label,
description: nodeData.outputs[j].description ?? '',
type
}
options.push(newOutputOption)
@ -107,6 +108,7 @@ export const initNode = (nodeData, newNodeId) => {
name: 'output',
label: 'Output',
type: 'options',
description: nodeData.outputs[0].description ?? '',
options,
default: nodeData.outputs[0].name
}
@ -116,6 +118,7 @@ export const initNode = (nodeData, newNodeId) => {
id: `${newNodeId}-output-${nodeData.name}-${nodeData.baseClasses.join('|')}`,
name: nodeData.name,
label: nodeData.type,
description: nodeData.description ?? '',
type: nodeData.baseClasses.join(' | ')
}
outputAnchors.push(newOutput)

View File

@ -91,9 +91,11 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA
}
}
const onManageLinksDialogClicked = (url, selectedLinks) => {
const onManageLinksDialogClicked = (url, selectedLinks, relativeLinksMethod, limit) => {
const dialogProps = {
url,
relativeLinksMethod,
limit,
selectedLinks,
confirmButtonName: 'Save',
cancelButtonName: 'Cancel'
@ -475,7 +477,9 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA
onClick={() =>
onManageLinksDialogClicked(
data.inputs[inputParam.name] ?? inputParam.default ?? '',
data.inputs.selectedLinks
data.inputs.selectedLinks,
data.inputs['relativeLinksMethod'] ?? 'webCrawl',
parseInt(data.inputs['limit']) ?? 0
)
}
>

View File

@ -4,9 +4,26 @@ import { useSelector } from 'react-redux'
import PropTypes from 'prop-types'
// material-ui
import { Grid, Box, Stack, Tabs, Tab, Badge } from '@mui/material'
import {
Grid,
Box,
Stack,
Badge,
Toolbar,
TextField,
InputAdornment,
ButtonGroup,
ToggleButton,
InputLabel,
FormControl,
Select,
OutlinedInput,
Checkbox,
ListItemText,
Button
} from '@mui/material'
import { useTheme } from '@mui/material/styles'
import { IconHierarchy, IconTool } from '@tabler/icons'
import { IconChevronsDown, IconChevronsUp, IconLayoutGrid, IconList, IconSearch } from '@tabler/icons'
// project imports
import MainCard from 'ui-component/cards/MainCard'
@ -23,6 +40,10 @@ import useApi from 'hooks/useApi'
// const
import { baseURL } from 'store/constant'
import * as React from 'react'
import ToggleButtonGroup from '@mui/material/ToggleButtonGroup'
import { MarketplaceTable } from '../../ui-component/table/MarketplaceTable'
import MenuItem from '@mui/material/MenuItem'
function TabPanel(props) {
const { children, value, index, ...other } = props
@ -45,6 +66,19 @@ TabPanel.propTypes = {
value: PropTypes.number.isRequired
}
const ITEM_HEIGHT = 48
const ITEM_PADDING_TOP = 8
const badges = ['POPULAR', 'NEW']
const types = ['Chatflow', 'Tool']
const framework = ['Langchain', 'LlamaIndex']
const MenuProps = {
PaperProps: {
style: {
maxHeight: ITEM_HEIGHT * 4.5 + ITEM_PADDING_TOP,
width: 250
}
}
}
// ==============================|| Marketplace ||============================== //
const Marketplace = () => {
@ -53,16 +87,77 @@ const Marketplace = () => {
const theme = useTheme()
const customization = useSelector((state) => state.customization)
const [isChatflowsLoading, setChatflowsLoading] = useState(true)
const [isToolsLoading, setToolsLoading] = useState(true)
const [isLoading, setLoading] = useState(true)
const [images, setImages] = useState({})
const tabItems = ['Chatflows', 'Tools']
const [value, setValue] = useState(0)
const [showToolDialog, setShowToolDialog] = useState(false)
const [toolDialogProps, setToolDialogProps] = useState({})
const getAllChatflowsMarketplacesApi = useApi(marketplacesApi.getAllChatflowsMarketplaces)
const getAllToolsMarketplacesApi = useApi(marketplacesApi.getAllToolsMarketplaces)
const getAllTemplatesMarketplacesApi = useApi(marketplacesApi.getAllTemplatesFromMarketplaces)
const [view, setView] = React.useState(localStorage.getItem('mpDisplayStyle') || 'card')
const [search, setSearch] = useState('')
const [badgeFilter, setBadgeFilter] = useState([])
const [typeFilter, setTypeFilter] = useState([])
const [frameworkFilter, setFrameworkFilter] = useState([])
const [open, setOpen] = useState(false)
const handleBadgeFilterChange = (event) => {
const {
target: { value }
} = event
setBadgeFilter(
// On autofill we get a stringified value.
typeof value === 'string' ? value.split(',') : value
)
}
const handleTypeFilterChange = (event) => {
const {
target: { value }
} = event
setTypeFilter(
// On autofill we get a stringified value.
typeof value === 'string' ? value.split(',') : value
)
}
const handleFrameworkFilterChange = (event) => {
const {
target: { value }
} = event
setFrameworkFilter(
// On autofill we get a stringified value.
typeof value === 'string' ? value.split(',') : value
)
}
const handleViewChange = (event, nextView) => {
localStorage.setItem('mpDisplayStyle', nextView)
setView(nextView)
}
const onSearchChange = (event) => {
setSearch(event.target.value)
}
function filterFlows(data) {
return (
data.categories?.toLowerCase().indexOf(search.toLowerCase()) > -1 ||
data.templateName.toLowerCase().indexOf(search.toLowerCase()) > -1 ||
(data.description && data.description.toLowerCase().indexOf(search.toLowerCase()) > -1)
)
}
function filterByBadge(data) {
return badgeFilter.length > 0 ? badgeFilter.includes(data.badge) : true
}
function filterByType(data) {
return typeFilter.length > 0 ? typeFilter.includes(data.type) : true
}
function filterByFramework(data) {
return frameworkFilter.length > 0 ? frameworkFilter.includes(data.framework) : true
}
const onUseTemplate = (selectedTool) => {
const dialogProp = {
@ -90,39 +185,33 @@ const Marketplace = () => {
navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow })
}
const handleChange = (event, newValue) => {
setValue(newValue)
}
useEffect(() => {
getAllChatflowsMarketplacesApi.request()
getAllToolsMarketplacesApi.request()
getAllTemplatesMarketplacesApi.request()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
useEffect(() => {
setChatflowsLoading(getAllChatflowsMarketplacesApi.loading)
}, [getAllChatflowsMarketplacesApi.loading])
setLoading(getAllTemplatesMarketplacesApi.loading)
}, [getAllTemplatesMarketplacesApi.loading])
useEffect(() => {
setToolsLoading(getAllToolsMarketplacesApi.loading)
}, [getAllToolsMarketplacesApi.loading])
useEffect(() => {
if (getAllChatflowsMarketplacesApi.data) {
if (getAllTemplatesMarketplacesApi.data) {
try {
const chatflows = getAllChatflowsMarketplacesApi.data
const flows = getAllTemplatesMarketplacesApi.data
const images = {}
for (let i = 0; i < chatflows.length; i += 1) {
const flowDataStr = chatflows[i].flowData
const flowData = JSON.parse(flowDataStr)
const nodes = flowData.nodes || []
images[chatflows[i].id] = []
for (let j = 0; j < nodes.length; j += 1) {
const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}`
if (!images[chatflows[i].id].includes(imageSrc)) {
images[chatflows[i].id].push(imageSrc)
for (let i = 0; i < flows.length; i += 1) {
if (flows[i].flowData) {
const flowDataStr = flows[i].flowData
const flowData = JSON.parse(flowDataStr)
const nodes = flowData.nodes || []
images[flows[i].id] = []
for (let j = 0; j < nodes.length; j += 1) {
const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}`
if (!images[flows[i].id].includes(imageSrc)) {
images[flows[i].id].push(imageSrc)
}
}
}
}
@ -131,80 +220,215 @@ const Marketplace = () => {
console.error(e)
}
}
}, [getAllChatflowsMarketplacesApi.data])
}, [getAllTemplatesMarketplacesApi.data])
return (
<>
<MainCard sx={{ background: customization.isDarkMode ? theme.palette.common.black : '' }}>
<Stack flexDirection='row'>
<h1>Marketplace</h1>
</Stack>
<Tabs sx={{ mb: 2 }} variant='fullWidth' value={value} onChange={handleChange} aria-label='tabs'>
{tabItems.map((item, index) => (
<Tab
key={index}
icon={index === 0 ? <IconHierarchy /> : <IconTool />}
iconPosition='start'
label={<span style={{ fontSize: '1.1rem' }}>{item}</span>}
<Box sx={{ flexGrow: 1 }}>
<Toolbar
disableGutters={true}
style={{
margin: 1,
padding: 1,
paddingBottom: 10,
display: 'flex',
justifyContent: 'space-between',
width: '100%'
}}
>
<h1>Marketplace</h1>
<TextField
size='small'
id='search-filter-textbox'
sx={{ display: { xs: 'none', sm: 'block' }, ml: 3 }}
variant='outlined'
fullWidth='true'
placeholder='Search name or description or node name'
onChange={onSearchChange}
InputProps={{
startAdornment: (
<InputAdornment position='start'>
<IconSearch />
</InputAdornment>
)
}}
/>
))}
</Tabs>
{tabItems.map((item, index) => (
<TabPanel key={index} value={value} index={index}>
{item === 'Chatflows' && (
<Grid container spacing={gridSpacing}>
{!isChatflowsLoading &&
getAllChatflowsMarketplacesApi.data &&
getAllChatflowsMarketplacesApi.data.map((data, index) => (
<Grid key={index} item lg={3} md={4} sm={6} xs={12}>
{data.badge && (
<Badge
sx={{
'& .MuiBadge-badge': {
right: 20
}
}}
badgeContent={data.badge}
color={data.badge === 'POPULAR' ? 'primary' : 'error'}
>
<Button
sx={{ width: '220px', ml: 3, mr: 5 }}
variant='outlined'
onClick={() => setOpen(!open)}
startIcon={open ? <IconChevronsUp /> : <IconChevronsDown />}
>
{open ? 'Hide Filters' : 'Show Filters'}
</Button>
<Box sx={{ flexGrow: 1 }} />
<ButtonGroup sx={{ maxHeight: 40 }} disableElevation variant='contained' aria-label='outlined primary button group'>
<ButtonGroup disableElevation variant='contained' aria-label='outlined primary button group'>
<ToggleButtonGroup
sx={{ maxHeight: 40 }}
value={view}
color='primary'
exclusive
onChange={handleViewChange}
>
<ToggleButton
sx={{ color: theme?.customization?.isDarkMode ? 'white' : 'inherit' }}
variant='contained'
value='card'
title='Card View'
>
<IconLayoutGrid />
</ToggleButton>
<ToggleButton
sx={{ color: theme?.customization?.isDarkMode ? 'white' : 'inherit' }}
variant='contained'
value='list'
title='List View'
>
<IconList />
</ToggleButton>
</ToggleButtonGroup>
</ButtonGroup>
</ButtonGroup>
</Toolbar>
</Box>
{open && (
<Box sx={{ flexGrow: 1, mb: 2 }}>
<Toolbar
disableGutters={true}
style={{
margin: 1,
padding: 1,
paddingBottom: 10,
display: 'flex',
justifyContent: 'flex-start',
width: '100%',
borderBottom: '1px solid'
}}
>
<FormControl sx={{ m: 1, width: 250 }}>
<InputLabel size='small' id='filter-badge-label'>
Tag
</InputLabel>
<Select
labelId='filter-badge-label'
id='filter-badge-checkbox'
size='small'
multiple
value={badgeFilter}
onChange={handleBadgeFilterChange}
input={<OutlinedInput label='Badge' />}
renderValue={(selected) => selected.join(', ')}
MenuProps={MenuProps}
>
{badges.map((name) => (
<MenuItem key={name} value={name}>
<Checkbox checked={badgeFilter.indexOf(name) > -1} />
<ListItemText primary={name} />
</MenuItem>
))}
</Select>
</FormControl>
<FormControl sx={{ m: 1, width: 250 }}>
<InputLabel size='small' id='type-badge-label'>
Type
</InputLabel>
<Select
size='small'
labelId='type-badge-label'
id='type-badge-checkbox'
multiple
value={typeFilter}
onChange={handleTypeFilterChange}
input={<OutlinedInput label='Badge' />}
renderValue={(selected) => selected.join(', ')}
MenuProps={MenuProps}
>
{types.map((name) => (
<MenuItem key={name} value={name}>
<Checkbox checked={typeFilter.indexOf(name) > -1} />
<ListItemText primary={name} />
</MenuItem>
))}
</Select>
</FormControl>
<FormControl sx={{ m: 1, width: 250 }}>
<InputLabel size='small' id='type-fw-label'>
Framework
</InputLabel>
<Select
size='small'
labelId='type-fw-label'
id='type-fw-checkbox'
multiple
value={frameworkFilter}
onChange={handleFrameworkFilterChange}
input={<OutlinedInput label='Badge' />}
renderValue={(selected) => selected.join(', ')}
MenuProps={MenuProps}
>
{framework.map((name) => (
<MenuItem key={name} value={name}>
<Checkbox checked={frameworkFilter.indexOf(name) > -1} />
<ListItemText primary={name} />
</MenuItem>
))}
</Select>
</FormControl>
</Toolbar>
</Box>
)}
{!isLoading && (!view || view === 'card') && getAllTemplatesMarketplacesApi.data && (
<>
<Grid container spacing={gridSpacing}>
{getAllTemplatesMarketplacesApi.data
.filter(filterByBadge)
.filter(filterByType)
.filter(filterFlows)
.filter(filterByFramework)
.map((data, index) => (
<Grid key={index} item lg={3} md={4} sm={6} xs={12}>
{data.badge && (
<Badge
sx={{
'& .MuiBadge-badge': {
right: 20
}
}}
badgeContent={data.badge}
color={data.badge === 'POPULAR' ? 'primary' : 'error'}
>
{data.type === 'Chatflow' && (
<ItemCard onClick={() => goToCanvas(data)} data={data} images={images[data.id]} />
</Badge>
)}
{!data.badge && (
<ItemCard onClick={() => goToCanvas(data)} data={data} images={images[data.id]} />
)}
</Grid>
))}
</Grid>
)}
{item === 'Tools' && (
<Grid container spacing={gridSpacing}>
{!isToolsLoading &&
getAllToolsMarketplacesApi.data &&
getAllToolsMarketplacesApi.data.map((data, index) => (
<Grid key={index} item lg={3} md={4} sm={6} xs={12}>
{data.badge && (
<Badge
sx={{
'& .MuiBadge-badge': {
right: 20
}
}}
badgeContent={data.badge}
color={data.badge === 'POPULAR' ? 'primary' : 'error'}
>
<ItemCard data={data} onClick={() => goToTool(data)} />
</Badge>
)}
{!data.badge && <ItemCard data={data} onClick={() => goToTool(data)} />}
</Grid>
))}
</Grid>
)}
</TabPanel>
))}
{((!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0)) ||
(!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0))) && (
)}
{data.type === 'Tool' && <ItemCard data={data} onClick={() => goToTool(data)} />}
</Badge>
)}
{!data.badge && data.type === 'Chatflow' && (
<ItemCard onClick={() => goToCanvas(data)} data={data} images={images[data.id]} />
)}
{!data.badge && data.type === 'Tool' && <ItemCard data={data} onClick={() => goToTool(data)} />}
</Grid>
))}
</Grid>
</>
)}
{!isLoading && view === 'list' && getAllTemplatesMarketplacesApi.data && (
<MarketplaceTable
sx={{ mt: 20 }}
data={getAllTemplatesMarketplacesApi.data}
filterFunction={filterFlows}
filterByType={filterByType}
filterByBadge={filterByBadge}
filterByFramework={filterByFramework}
goToTool={goToTool}
goToCanvas={goToCanvas}
/>
)}
{!isLoading && (!getAllTemplatesMarketplacesApi.data || getAllTemplatesMarketplacesApi.data.length === 0) && (
<Stack sx={{ alignItems: 'center', justifyContent: 'center' }} flexDirection='column'>
<Box sx={{ p: 2, height: 'auto' }}>
<img