From c96572e10ff8fa954d973f10748c603a00d49ef9 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Sat, 25 Nov 2023 16:39:02 +0530 Subject: [PATCH 01/62] GPT Vision - OpenAIVisionChain --- .../chains/VisionChain/OpenAIVisionChain.ts | 216 ++++++++++++++++++ .../nodes/chains/VisionChain/VLLMChain.ts | 146 ++++++++++++ packages/server/src/index.ts | 26 +++ 3 files changed, 388 insertions(+) create mode 100644 packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts create mode 100644 packages/components/nodes/chains/VisionChain/VLLMChain.ts diff --git a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts new file mode 100644 index 00000000..f2260a76 --- /dev/null +++ b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts @@ -0,0 +1,216 @@ +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { VLLMChain } from './VLLMChain' +import { BaseLanguageModel } from 'langchain/base_language' +import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' +import { ChatOpenAI } from 'langchain/chat_models/openai' + +class OpenAIVisionChain_Chains implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + baseClasses: string[] + description: string + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Open AI Vision Chain' + this.name = 'openAIVisionChain' + this.version = 3.0 + this.type = 'OpenAIVisionChain' + this.icon = 'chain.svg' + this.category = 'Chains' + this.description = 'Chain to run queries against OpenAI (GPT-4) Vision .' + this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)] + this.inputs = [ + { + label: 'Language Model (Works only with Open AI [gpt-4-vision-preview])', + name: 'model', + type: 'BaseLanguageModel' + }, + { + label: 'Prompt', + name: 'prompt', + type: 'BasePromptTemplate', + optional: true + }, + { + label: 'Image Resolution', + description: 'This parameter controls the resolution in which the model views the image.', + name: 'imageResolution', + type: 'options', + options: [ + { + label: 'Low', + name: 'low' + }, + { + label: 'High', + name: 'high' + } + ], + default: 'low', + optional: false + }, + { + label: 'Chain Name', + name: 'chainName', + type: 'string', + placeholder: 'Name Your Chain', + optional: true + } + ] + this.outputs = [ + { + label: 'Open AI Vision Chain', + name: 'openAIVisionChain', + baseClasses: [this.type, ...getBaseClasses(VLLMChain)] + }, + { + label: 'Output Prediction', + name: 'outputPrediction', + baseClasses: ['string', 'json'] + } + ] + } + + async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel + const prompt = nodeData.inputs?.prompt + const output = nodeData.outputs?.output as string + const imageResolution = nodeData.inputs?.imageResolution + const promptValues = prompt.promptValues as ICommonObject + if (!(model as any).openAIApiKey || (model as any).modelName !== 'gpt-4-vision-preview') { + throw new Error('Chain works with OpenAI Vision model only') + } + const openAIModel = model as ChatOpenAI + const fields = { + openAIApiKey: openAIModel.openAIApiKey, + imageResolution: imageResolution, + verbose: process.env.DEBUG === 'true', + imageUrls: options.url, + openAIModel: openAIModel + } + if (output === this.name) { + const chain = new VLLMChain({ + ...fields, + prompt: prompt + }) + return chain + } else if (output === 'outputPrediction') { + const chain = new VLLMChain({ + ...fields + }) + const inputVariables: string[] = prompt.inputVariables as string[] // ["product"] + const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) + // eslint-disable-next-line no-console + console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m') + // eslint-disable-next-line no-console + console.log(res) + /** + * Apply string transformation to convert special chars: + * FROM: hello i am ben\n\n\thow are you? + * TO: hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you? + */ + return handleEscapeCharacters(res, false) + } + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const prompt = nodeData.inputs?.prompt + const inputVariables: string[] = prompt.inputVariables as string[] // ["product"] + const chain = nodeData.instance as VLLMChain + let promptValues: ICommonObject | undefined = nodeData.inputs?.prompt.promptValues as ICommonObject + const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) + // eslint-disable-next-line no-console + console.log('\x1b[93m\x1b[1m\n*****FINAL RESULT*****\n\x1b[0m\x1b[0m') + // eslint-disable-next-line no-console + console.log(res) + return res + } +} + +const runPrediction = async ( + inputVariables: string[], + chain: VLLMChain, + input: string, + promptValuesRaw: ICommonObject | undefined, + options: ICommonObject, + nodeData: INodeData +) => { + const loggerHandler = new ConsoleCallbackHandler(options.logger) + const callbacks = await additionalCallbacks(nodeData, options) + + const isStreaming = options.socketIO && options.socketIOClientId + const socketIO = isStreaming ? options.socketIO : undefined + const socketIOClientId = isStreaming ? options.socketIOClientId : '' + + /** + * Apply string transformation to reverse converted special chars: + * FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" } + * TO: { "value": "hello i am ben\n\n\thow are you?" } + */ + const promptValues = handleEscapeCharacters(promptValuesRaw, true) + if (options?.url) { + chain.imageUrls = options.url + } + if (promptValues && inputVariables.length > 0) { + let seen: string[] = [] + + for (const variable of inputVariables) { + seen.push(variable) + if (promptValues[variable]) { + chain.inputKey = variable + seen.pop() + } + } + + if (seen.length === 0) { + // All inputVariables have fixed values specified + const options = { ...promptValues } + if (isStreaming) { + const handler = new CustomChainHandler(socketIO, socketIOClientId) + const res = await chain.call(options, [loggerHandler, handler, ...callbacks]) + return formatResponse(res?.text) + } else { + const res = await chain.call(options, [loggerHandler, ...callbacks]) + return formatResponse(res?.text) + } + } else if (seen.length === 1) { + // If one inputVariable is not specify, use input (user's question) as value + const lastValue = seen.pop() + if (!lastValue) throw new Error('Please provide Prompt Values') + chain.inputKey = lastValue as string + const options = { + ...promptValues, + [lastValue]: input + } + if (isStreaming) { + const handler = new CustomChainHandler(socketIO, socketIOClientId) + const res = await chain.call(options, [loggerHandler, handler, ...callbacks]) + return formatResponse(res?.text) + } else { + const res = await chain.call(options, [loggerHandler, ...callbacks]) + return formatResponse(res?.text) + } + } else { + throw new Error(`Please provide Prompt Values for: ${seen.join(', ')}`) + } + } else { + if (isStreaming) { + const handler = new CustomChainHandler(socketIO, socketIOClientId) + const res = await chain.run(input, [loggerHandler, handler, ...callbacks]) + return formatResponse(res) + } else { + const res = await chain.run(input, [loggerHandler, ...callbacks]) + return formatResponse(res) + } + } +} + +module.exports = { nodeClass: OpenAIVisionChain_Chains } diff --git a/packages/components/nodes/chains/VisionChain/VLLMChain.ts b/packages/components/nodes/chains/VisionChain/VLLMChain.ts new file mode 100644 index 00000000..17260be2 --- /dev/null +++ b/packages/components/nodes/chains/VisionChain/VLLMChain.ts @@ -0,0 +1,146 @@ +import { OpenAI as OpenAIClient, ClientOptions } from 'openai' +import { BaseChain, ChainInputs } from 'langchain/chains' +import { ChainValues } from 'langchain/schema' +import { BasePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' +import { ChatOpenAI } from 'langchain/chat_models/openai' + +/** + * Interface for the input parameters of the OpenAIVisionChain class. + */ +export interface OpenAIVisionChainInput extends ChainInputs { + openAIApiKey?: string + openAIOrganization?: string + throwError?: boolean + prompt?: BasePromptTemplate + configuration?: ClientOptions + imageUrls?: [] + imageResolution?: string + openAIModel: ChatOpenAI +} + +/** + * Class representing a chain for generating text from an image using the OpenAI + * Vision API. It extends the BaseChain class and implements the + * OpenAIVisionChainInput interface. + */ +export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { + static lc_name() { + return 'VLLMChain' + } + + get lc_secrets(): { [key: string]: string } | undefined { + return { + openAIApiKey: 'OPENAI_API_KEY' + } + } + prompt: BasePromptTemplate | undefined + + inputKey = 'input' + outputKey = 'text' + imageUrls?: [] + imageResolution: string = 'low' + openAIApiKey?: string + openAIOrganization?: string + openAIModel: ChatOpenAI + clientConfig: ClientOptions + client: OpenAIClient + throwError: boolean + + constructor(fields: OpenAIVisionChainInput) { + super(fields) + this.throwError = fields?.throwError ?? false + this.imageResolution = fields?.imageResolution ?? 'low' + this.openAIApiKey = fields?.openAIApiKey + this.prompt = fields?.prompt + this.imageUrls = fields?.imageUrls ?? [] + if (!this.openAIApiKey) { + throw new Error('OpenAI API key not found') + } + + this.openAIOrganization = fields?.openAIOrganization + this.openAIModel = fields.openAIModel + + this.clientConfig = { + ...fields?.configuration, + apiKey: this.openAIApiKey, + organization: this.openAIOrganization + } + + this.client = new OpenAIClient(this.clientConfig) + } + + async _call(values: ChainValues): Promise { + const userInput = values[this.inputKey] + + const vRequest: any = { + model: 'gpt-4-vision-preview', + temperature: this.openAIModel.temperature, + top_p: this.openAIModel.topP, + messages: [] + } + if (this.openAIModel.maxTokens) vRequest.max_tokens = this.openAIModel.maxTokens + + const userRole: any = { role: 'user' } + userRole.content = [] + userRole.content.push({ + type: 'text', + text: userInput + }) + if (this.imageUrls && this.imageUrls.length > 0) { + this.imageUrls.forEach((imageUrl: any) => { + userRole.content.push({ + type: 'image_url', + image_url: { + url: imageUrl?.data, + detail: this.imageResolution + } + }) + }) + } + vRequest.messages.push(userRole) + if (this.prompt && this.prompt instanceof ChatPromptTemplate) { + let chatPrompt = this.prompt as ChatPromptTemplate + chatPrompt.promptMessages.forEach((message: any) => { + if (message instanceof SystemMessagePromptTemplate) { + vRequest.messages.push({ + role: 'system', + content: [ + { + type: 'text', + text: (message.prompt as any).template + } + ] + }) + } + }) + } + + let response + try { + // @ts-ignore + response = await this.client.chat.completions.create(vRequest) + } catch (error) { + if (error instanceof Error) { + throw error + } else { + throw new Error(error as string) + } + } + const output = response.choices[0] + return { + [this.outputKey]: output.message.content + } + } + + _chainType() { + return 'vision_chain' + } + + get inputKeys() { + return this.prompt?.inputVariables ?? [this.inputKey] + } + + get outputKeys(): string[] { + return [this.outputKey] + } +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 91de4f4c..9bc3eb3a 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -403,6 +403,19 @@ export class App { return res.json(obj) }) + // Check if chatflow valid for uploads + this.app.get('/api/v1/chatflows-uploads/:id', async (req: Request, res: Response) => { + const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ + id: req.params.id + }) + if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) + + const obj = { + allowUploads: this.shouldAllowUploads(chatflow) + } + return res.json(obj) + }) + // ---------------------------------------- // ChatMessage // ---------------------------------------- @@ -1241,6 +1254,19 @@ export class App { }) } + private uploadAllowedNodes = ['OpenAIVisionChain'] + private shouldAllowUploads(result: ChatFlow): boolean { + const flowObj = JSON.parse(result.flowData) + let allowUploads = false + flowObj.nodes.forEach((node: IReactFlowNode) => { + if (this.uploadAllowedNodes.indexOf(node.data.type) > -1) { + logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) + allowUploads = true + } + }) + return allowUploads + } + /** * Validate API Key * @param {Request} req From 73f7046316ac8cf6645515332b0c3f3b8aed7c95 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 6 Dec 2023 12:31:33 +0530 Subject: [PATCH 02/62] GPT Vision: Initial implementation of the OpenAI Vision API --- .../chains/VisionChain/OpenAIVisionChain.ts | 24 +- .../nodes/chains/VisionChain/VLLMChain.ts | 1 + .../nodes/chains/VisionChain/chain.svg | 6 + packages/server/src/Interface.ts | 2 + .../src/database/entities/ChatMessage.ts | 3 + ...01788586491-AddFileUploadsToChatMessage.ts | 12 + .../src/database/migrations/mysql/index.ts | 4 +- ...01788586491-AddFileUploadsToChatMessage.ts | 11 + .../src/database/migrations/postgres/index.ts | 4 +- ...01788586491-AddFileUploadsToChatMessage.ts | 20 ++ .../src/database/migrations/sqlite/index.ts | 4 +- packages/server/src/index.ts | 44 ++- packages/ui/src/api/chatflows.js | 4 +- .../ui/src/views/chatmessage/ChatMessage.css | 29 ++ .../ui/src/views/chatmessage/ChatMessage.js | 301 +++++++++++++++++- 15 files changed, 447 insertions(+), 22 deletions(-) create mode 100644 packages/components/nodes/chains/VisionChain/chain.svg create mode 100644 packages/server/src/database/migrations/mysql/1701788586491-AddFileUploadsToChatMessage.ts create mode 100644 packages/server/src/database/migrations/postgres/1701788586491-AddFileUploadsToChatMessage.ts create mode 100644 packages/server/src/database/migrations/sqlite/1701788586491-AddFileUploadsToChatMessage.ts diff --git a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts index f2260a76..7745f05d 100644 --- a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts +++ b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts @@ -12,6 +12,7 @@ class OpenAIVisionChain_Chains implements INode { version: number type: string icon: string + badge: string category: string baseClasses: string[] description: string @@ -21,10 +22,11 @@ class OpenAIVisionChain_Chains implements INode { constructor() { this.label = 'Open AI Vision Chain' this.name = 'openAIVisionChain' - this.version = 3.0 + this.version = 1.0 this.type = 'OpenAIVisionChain' this.icon = 'chain.svg' this.category = 'Chains' + this.badge = 'EXPERIMENTAL' this.description = 'Chain to run queries against OpenAI (GPT-4) Vision .' this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)] this.inputs = [ @@ -63,6 +65,20 @@ class OpenAIVisionChain_Chains implements INode { type: 'string', placeholder: 'Name Your Chain', optional: true + }, + { + label: 'Accepted Upload Types', + name: 'allowedUploadTypes', + type: 'string', + default: 'image/gif;image/jpeg;image/png;image/webp', + hidden: true + }, + { + label: 'Maximum Upload Size (MB)', + name: 'maxUploadSize', + type: 'number', + default: '5', + hidden: true } ] this.outputs = [ @@ -93,7 +109,7 @@ class OpenAIVisionChain_Chains implements INode { openAIApiKey: openAIModel.openAIApiKey, imageResolution: imageResolution, verbose: process.env.DEBUG === 'true', - imageUrls: options.url, + imageUrls: options.uploads, openAIModel: openAIModel } if (output === this.name) { @@ -156,8 +172,8 @@ const runPrediction = async ( * TO: { "value": "hello i am ben\n\n\thow are you?" } */ const promptValues = handleEscapeCharacters(promptValuesRaw, true) - if (options?.url) { - chain.imageUrls = options.url + if (options?.uploads) { + chain.imageUrls = options.uploads } if (promptValues && inputVariables.length > 0) { let seen: string[] = [] diff --git a/packages/components/nodes/chains/VisionChain/VLLMChain.ts b/packages/components/nodes/chains/VisionChain/VLLMChain.ts index 17260be2..59a2483a 100644 --- a/packages/components/nodes/chains/VisionChain/VLLMChain.ts +++ b/packages/components/nodes/chains/VisionChain/VLLMChain.ts @@ -79,6 +79,7 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { messages: [] } if (this.openAIModel.maxTokens) vRequest.max_tokens = this.openAIModel.maxTokens + else vRequest.max_tokens = 1024 const userRole: any = { role: 'user' } userRole.content = [] diff --git a/packages/components/nodes/chains/VisionChain/chain.svg b/packages/components/nodes/chains/VisionChain/chain.svg new file mode 100644 index 00000000..a5b32f90 --- /dev/null +++ b/packages/components/nodes/chains/VisionChain/chain.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/packages/server/src/Interface.ts b/packages/server/src/Interface.ts index c562b4ee..30b4bd35 100644 --- a/packages/server/src/Interface.ts +++ b/packages/server/src/Interface.ts @@ -31,6 +31,7 @@ export interface IChatMessage { sourceDocuments?: string usedTools?: string fileAnnotations?: string + fileUploads?: string chatType: string chatId: string memoryType?: string @@ -167,6 +168,7 @@ export interface IncomingInput { socketIOClientId?: string chatId?: string stopNodeId?: string + uploads?: string } export interface IActiveChatflows { diff --git a/packages/server/src/database/entities/ChatMessage.ts b/packages/server/src/database/entities/ChatMessage.ts index 4054a26d..c803ce50 100644 --- a/packages/server/src/database/entities/ChatMessage.ts +++ b/packages/server/src/database/entities/ChatMessage.ts @@ -26,6 +26,9 @@ export class ChatMessage implements IChatMessage { @Column({ nullable: true, type: 'text' }) fileAnnotations?: string + @Column({ nullable: true, type: 'text' }) + fileUploads?: string + @Column() chatType: string diff --git a/packages/server/src/database/migrations/mysql/1701788586491-AddFileUploadsToChatMessage.ts b/packages/server/src/database/migrations/mysql/1701788586491-AddFileUploadsToChatMessage.ts new file mode 100644 index 00000000..d896066b --- /dev/null +++ b/packages/server/src/database/migrations/mysql/1701788586491-AddFileUploadsToChatMessage.ts @@ -0,0 +1,12 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + const columnExists = await queryRunner.hasColumn('chat_message', 'fileUploads') + if (!columnExists) queryRunner.query(`ALTER TABLE \`chat_message\` ADD COLUMN \`fileUploads\` TEXT;`) + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`ALTER TABLE \`chat_message\` DROP COLUMN \`fileUploads\`;`) + } +} diff --git a/packages/server/src/database/migrations/mysql/index.ts b/packages/server/src/database/migrations/mysql/index.ts index 8f9824a8..f5adff64 100644 --- a/packages/server/src/database/migrations/mysql/index.ts +++ b/packages/server/src/database/migrations/mysql/index.ts @@ -10,6 +10,7 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage' import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow' import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage' +import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage' export const mysqlMigrations = [ Init1693840429259, @@ -23,5 +24,6 @@ export const mysqlMigrations = [ AddAssistantEntity1699325775451, AddUsedToolsToChatMessage1699481607341, AddCategoryToChatFlow1699900910291, - AddFileAnnotationsToChatMessage1700271021237 + AddFileAnnotationsToChatMessage1700271021237, + AddFileUploadsToChatMessage1701788586491 ] diff --git a/packages/server/src/database/migrations/postgres/1701788586491-AddFileUploadsToChatMessage.ts b/packages/server/src/database/migrations/postgres/1701788586491-AddFileUploadsToChatMessage.ts new file mode 100644 index 00000000..6574ac81 --- /dev/null +++ b/packages/server/src/database/migrations/postgres/1701788586491-AddFileUploadsToChatMessage.ts @@ -0,0 +1,11 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query(`ALTER TABLE "chat_message" ADD COLUMN IF NOT EXISTS "fileUploads" TEXT;`) + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "fileUploads";`) + } +} diff --git a/packages/server/src/database/migrations/postgres/index.ts b/packages/server/src/database/migrations/postgres/index.ts index d196fbc1..f80335a0 100644 --- a/packages/server/src/database/migrations/postgres/index.ts +++ b/packages/server/src/database/migrations/postgres/index.ts @@ -10,6 +10,7 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage' import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow' import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage' +import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage' export const postgresMigrations = [ Init1693891895163, @@ -23,5 +24,6 @@ export const postgresMigrations = [ AddAssistantEntity1699325775451, AddUsedToolsToChatMessage1699481607341, AddCategoryToChatFlow1699900910291, - AddFileAnnotationsToChatMessage1700271021237 + AddFileAnnotationsToChatMessage1700271021237, + AddFileUploadsToChatMessage1701788586491 ] diff --git a/packages/server/src/database/migrations/sqlite/1701788586491-AddFileUploadsToChatMessage.ts b/packages/server/src/database/migrations/sqlite/1701788586491-AddFileUploadsToChatMessage.ts new file mode 100644 index 00000000..68e33220 --- /dev/null +++ b/packages/server/src/database/migrations/sqlite/1701788586491-AddFileUploadsToChatMessage.ts @@ -0,0 +1,20 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query( + `CREATE TABLE "temp_chat_message" ("id" varchar PRIMARY KEY NOT NULL, "role" varchar NOT NULL, "chatflowid" varchar NOT NULL, "content" text NOT NULL, "sourceDocuments" text, "usedTools" text, "fileAnnotations" text, "fileUploads" text, "createdDate" datetime NOT NULL DEFAULT (datetime('now')), "chatType" VARCHAR NOT NULL DEFAULT 'INTERNAL', "chatId" VARCHAR NOT NULL, "memoryType" VARCHAR, "sessionId" VARCHAR);` + ) + await queryRunner.query( + `INSERT INTO "temp_chat_message" ("id", "role", "chatflowid", "content", "sourceDocuments", "fileAnnotations", "usedTools", "createdDate", "chatType", "chatId", "memoryType", "sessionId") SELECT "id", "role", "chatflowid", "content", "sourceDocuments", "usedTools", "fileAnnotations", "createdDate", "chatType", "chatId", "memoryType", "sessionId" FROM "chat_message";` + ) + await queryRunner.query(`DROP TABLE "chat_message";`) + await queryRunner.query(`ALTER TABLE "temp_chat_message" RENAME TO "chat_message";`) + await queryRunner.query(`CREATE INDEX "IDX_e574527322272fd838f4f0f3d3" ON "chat_message" ("chatflowid") ;`) + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`DROP TABLE IF EXISTS "temp_chat_message";`) + await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "fileUploads";`) + } +} diff --git a/packages/server/src/database/migrations/sqlite/index.ts b/packages/server/src/database/migrations/sqlite/index.ts index fdd83064..bae0cec8 100644 --- a/packages/server/src/database/migrations/sqlite/index.ts +++ b/packages/server/src/database/migrations/sqlite/index.ts @@ -10,6 +10,7 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage' import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow' import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage' +import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage' export const sqliteMigrations = [ Init1693835579790, @@ -23,5 +24,6 @@ export const sqliteMigrations = [ AddAssistantEntity1699325775451, AddUsedToolsToChatMessage1699481607341, AddCategoryToChatFlow1699900910291, - AddFileAnnotationsToChatMessage1700271021237 + AddFileAnnotationsToChatMessage1700271021237, + AddFileUploadsToChatMessage1701788586491 ] diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 9bc3eb3a..195eaf1d 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -410,9 +410,7 @@ export class App { }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) - const obj = { - allowUploads: this.shouldAllowUploads(chatflow) - } + const obj = this.shouldAllowUploads(chatflow) return res.json(obj) }) @@ -1255,16 +1253,30 @@ export class App { } private uploadAllowedNodes = ['OpenAIVisionChain'] - private shouldAllowUploads(result: ChatFlow): boolean { + private shouldAllowUploads(result: ChatFlow): any { const flowObj = JSON.parse(result.flowData) let allowUploads = false + let allowedTypes: string[] = [] + let maxUploadSize: number = -1 flowObj.nodes.forEach((node: IReactFlowNode) => { if (this.uploadAllowedNodes.indexOf(node.data.type) > -1) { logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) allowUploads = true + node.data.inputParams.map((param: any) => { + if (param.name === 'allowedUploadTypes') { + allowedTypes = param.default.split(';') + } + if (param.name === 'maxUploadSize') { + maxUploadSize = parseInt(param.default ? param.default : '0') + } + }) } }) - return allowUploads + return { + allowUploads, + allowedTypes, + maxUploadSize + } } /** @@ -1392,6 +1404,23 @@ export class App { if (!isKeyValidated) return res.status(401).send('Unauthorized') } + if (incomingInput.uploads) { + // @ts-ignore + ;(incomingInput.uploads as any[]).forEach((url: any) => { + if (url.type === 'file') { + const filename = url.name + const bf = url.data + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', filename) + if (!fs.existsSync(path.join(getUserHome(), '.flowise', 'gptvision'))) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }) + } + if (!fs.existsSync(filePath)) fs.writeFileSync(filePath, bf) + fs.unlinkSync(filePath) + url.data = bf.toString('base64') + } + }) + } + let isStreamValid = false const files = (req.files as any[]) || [] @@ -1534,6 +1563,7 @@ export class App { let result = isStreamValid ? await nodeInstance.run(nodeToExecuteData, incomingInput.question, { + uploads: incomingInput.uploads, chatHistory: incomingInput.history, socketIO, socketIOClientId: incomingInput.socketIOClientId, @@ -1544,6 +1574,7 @@ export class App { chatId }) : await nodeInstance.run(nodeToExecuteData, incomingInput.question, { + uploads: incomingInput.uploads, chatHistory: incomingInput.history, logger, appDataSource: this.AppDataSource, @@ -1567,7 +1598,8 @@ export class App { chatId, memoryType, sessionId, - createdDate: userMessageDateTime + createdDate: userMessageDateTime, + fileUploads: incomingInput.uploads ? JSON.stringify(incomingInput.uploads) : '' } await this.addChatMessage(userMessage) diff --git a/packages/ui/src/api/chatflows.js b/packages/ui/src/api/chatflows.js index 8810b5a5..c02ca5cd 100644 --- a/packages/ui/src/api/chatflows.js +++ b/packages/ui/src/api/chatflows.js @@ -13,6 +13,7 @@ const updateChatflow = (id, body) => client.put(`/chatflows/${id}`, body) const deleteChatflow = (id) => client.delete(`/chatflows/${id}`) const getIsChatflowStreaming = (id) => client.get(`/chatflows-streaming/${id}`) +const getAllowChatflowUploads = (id) => client.get(`/chatflows-uploads/${id}`) export default { getAllChatflows, @@ -21,5 +22,6 @@ export default { createNewChatflow, updateChatflow, deleteChatflow, - getIsChatflowStreaming + getIsChatflowStreaming, + getAllowChatflowUploads } diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 2298fee6..f1831d39 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -144,3 +144,32 @@ justify-content: center; align-items: center; } + +.file-drop-field { + position: relative; /* Needed to position the icon correctly */ + /* Other styling for the field */ +} + +.drop-overlay { + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: rgba(137, 134, 134, 0.83); /* Semi-transparent white */ + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + z-index: 10; /* Ensure it's above other content */ + border: 2px dashed #0094ff; /* Example style */ +} + +.preview-container { + +} + +.button { + flex: 0 0 auto; /* Don't grow, don't shrink, base width on content */ + margin: 5px; /* Adjust as needed for spacing between buttons */ +} \ No newline at end of file diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index c610f944..0243f252 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -1,4 +1,4 @@ -import { useState, useRef, useEffect, useCallback } from 'react' +import { useCallback, useEffect, useRef, useState } from 'react' import { useSelector } from 'react-redux' import PropTypes from 'prop-types' import socketIOClient from 'socket.io-client' @@ -9,9 +9,23 @@ import remarkGfm from 'remark-gfm' import remarkMath from 'remark-math' import axios from 'axios' -import { CircularProgress, OutlinedInput, Divider, InputAdornment, IconButton, Box, Chip, Button } from '@mui/material' +import { + Box, + Button, + Card, + CardActions, + CardMedia, + Chip, + CircularProgress, + Divider, + Grid, + IconButton, + InputAdornment, + OutlinedInput, + Typography +} from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconSend, IconDownload } from '@tabler/icons' +import { IconDownload, IconSend, IconUpload } from '@tabler/icons' // project import import { CodeBlock } from 'ui-component/markdown/CodeBlock' @@ -33,6 +47,7 @@ import { baseURL, maxScroll } from 'store/constant' import robotPNG from 'assets/images/robot.png' import userPNG from 'assets/images/account.png' import { isValidURL, removeDuplicateURL, setLocalStorageChatflow } from 'utils/genericHelper' +import DeleteIcon from '@mui/icons-material/Delete' export const ChatMessage = ({ open, chatflowid, isDialog }) => { const theme = useTheme() @@ -58,6 +73,185 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow) const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming) + const fileUploadRef = useRef(null) + const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads) + const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false) + const [previews, setPreviews] = useState([]) + const [isDragOver, setIsDragOver] = useState(false) + const handleDragOver = (e) => { + if (!isChatFlowAvailableForUploads) { + return + } + e.preventDefault() + } + const isFileAllowedForUpload = (file) => { + // check if file type is allowed + if (getAllowChatFlowUploads.data?.allowedTypes?.length > 0) { + const allowedFileTypes = getAllowChatFlowUploads.data?.allowedTypes + if (!allowedFileTypes.includes(file.type)) { + alert(`File ${file.name} is not allowed.\nAllowed file types are ${allowedFileTypes.join(', ')}.`) + return false + } + } + // check if file size is allowed + if (getAllowChatFlowUploads.data?.maxUploadSize > 0) { + const sizeInMB = file.size / 1024 / 1024 + if (sizeInMB > getAllowChatFlowUploads.data?.maxUploadSize) { + alert(`File ${file.name} is too large.\nMaximum allowed size is ${getAllowChatFlowUploads.data?.maxUploadSize} MB.`) + return false + } + } + return true + } + const handleDrop = async (e) => { + if (!isChatFlowAvailableForUploads) { + return + } + e.preventDefault() + setIsDragOver(false) + let files = [] + if (e.dataTransfer.files.length > 0) { + for (const file of e.dataTransfer.files) { + if (isFileAllowedForUpload(file) === false) { + return + } + const reader = new FileReader() + const { name } = file + files.push( + new Promise((resolve) => { + reader.onload = (evt) => { + if (!evt?.target?.result) { + return + } + const { result } = evt.target + resolve({ + data: result, + preview: URL.createObjectURL(file), + type: 'file', + name: name + }) + } + reader.readAsDataURL(file) + }) + ) + } + + const newFiles = await Promise.all(files) + setPreviews((prevPreviews) => [...prevPreviews, ...newFiles]) + } + if (e.dataTransfer.items) { + const newUploads = [] + for (const item of e.dataTransfer.items) { + if (item.kind === 'string' && item.type.match('^text/uri-list')) { + item.getAsString((s) => { + let upload = { + data: s, + preview: s, + type: 'url', + name: s.substring(s.lastIndexOf('/') + 1) + } + setPreviews((prevPreviews) => [...prevPreviews, upload]) + }) + } else if (item.kind === 'string' && item.type.match('^text/html')) { + item.getAsString((s) => { + if (s.indexOf('href') === -1) return + //extract href + let start = s.substring(s.indexOf('href') + 6) + let hrefStr = start.substring(0, start.indexOf('"')) + + let upload = { + data: hrefStr, + preview: hrefStr, + type: 'url', + name: hrefStr.substring(hrefStr.lastIndexOf('/') + 1) + } + setPreviews((prevPreviews) => [...prevPreviews, upload]) + }) + } + } + } + } + const handleFileChange = async (event) => { + const fileObj = event.target.files && event.target.files[0] + if (!fileObj) { + return + } + let files = [] + for (const file of event.target.files) { + if (isFileAllowedForUpload(file) === false) { + return + } + const reader = new FileReader() + const { name } = file + files.push( + new Promise((resolve) => { + reader.onload = (evt) => { + if (!evt?.target?.result) { + return + } + const { result } = evt.target + resolve({ + data: result, + preview: URL.createObjectURL(file), + type: 'file', + name: name + }) + } + reader.readAsDataURL(file) + }) + ) + } + + const newFiles = await Promise.all(files) + setPreviews((prevPreviews) => [...prevPreviews, ...newFiles]) + // 👇️ reset file input + event.target.value = null + } + + const handleDragEnter = (e) => { + if (isChatFlowAvailableForUploads) { + e.preventDefault() + setIsDragOver(true) + } + } + + const handleDragLeave = (e) => { + if (isChatFlowAvailableForUploads) { + e.preventDefault() + if (e.originalEvent?.pageX !== 0 || e.originalEvent?.pageY !== 0) { + return false + } + setIsDragOver(false) // Set the drag over state to false when the drag leaves + } + } + const handleDeletePreview = (itemToDelete) => { + if (itemToDelete.type === 'file') { + URL.revokeObjectURL(itemToDelete.preview) // Clean up for file + } + setPreviews(previews.filter((item) => item !== itemToDelete)) + } + const handleUploadClick = () => { + // 👇️ open file input box on click of another element + fileUploadRef.current.click() + } + + const previewStyle = { + width: '64px', + height: '64px', + objectFit: 'cover' // This makes the image cover the area, cropping it if necessary + } + const messageImageStyle = { + width: '128px', + height: '128px', + objectFit: 'cover' // This makes the image cover the area, cropping it if necessary + } + + const clearPreviews = () => { + // Revoke the data uris to avoid memory leaks + previews.forEach((file) => URL.revokeObjectURL(file.preview)) + setPreviews([]) + } + const onSourceDialogClick = (data, title) => { setSourceDialogProps({ data, title }) setSourceDialogOpen(true) @@ -113,7 +307,16 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } setLoading(true) - setMessages((prevMessages) => [...prevMessages, { message: userInput, type: 'userMessage' }]) + const urls = [] + previews.map((item) => { + urls.push({ + data: item.data, + type: item.type, + name: item.name + }) + }) + clearPreviews() + setMessages((prevMessages) => [...prevMessages, { message: userInput, type: 'userMessage', fileUploads: urls }]) // Send user question and history to API try { @@ -122,6 +325,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { history: messages.filter((msg) => msg.message !== 'Hi there! How can I help?'), chatId } + if (urls) params.uploads = urls if (isChatFlowAvailableToStream) params.socketIOClientId = socketIOClientId const response = await predictionApi.sendMessageAndGetPrediction(chatflowid, params) @@ -209,6 +413,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (message.sourceDocuments) obj.sourceDocuments = JSON.parse(message.sourceDocuments) if (message.usedTools) obj.usedTools = JSON.parse(message.usedTools) if (message.fileAnnotations) obj.fileAnnotations = JSON.parse(message.fileAnnotations) + if (message.fileUploads) obj.fileUploads = JSON.parse(message.fileUploads) return obj }) setMessages((prevMessages) => [...prevMessages, ...loadedMessages]) @@ -227,6 +432,14 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { // eslint-disable-next-line react-hooks/exhaustive-deps }, [getIsChatflowStreamingApi.data]) + // Get chatflow uploads capability + useEffect(() => { + if (getAllowChatFlowUploads.data) { + setIsChatFlowAvailableForUploads(getAllowChatFlowUploads.data?.allowUploads ?? false) + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getAllowChatFlowUploads.data]) + // Auto scroll chat to bottom useEffect(() => { scrollToBottom() @@ -245,6 +458,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (open && chatflowid) { getChatmessageApi.request(chatflowid) getIsChatflowStreamingApi.request(chatflowid) + getAllowChatFlowUploads.request(chatflowid) scrollToBottom() socket = socketIOClient(baseURL) @@ -281,9 +495,22 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { }, [open, chatflowid]) return ( - <> -
-
+
+ {isDragOver && ( + + Drop here to upload + {getAllowChatFlowUploads.data?.allowedTypes?.join(', ')} + Max Allowed Size: {getAllowChatFlowUploads.data?.maxUploadSize} MB + + )} +
+
{messages && messages.map((message, index) => { return ( @@ -375,6 +602,20 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { })}
)} + {message.fileUploads && + message.fileUploads.map((item, index) => { + return ( + + + + ) + })} {message.sourceDocuments && (
{removeDuplicateURL(message).map((source, index) => { @@ -430,6 +671,22 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { onChange={onChange} multiline={true} maxRows={isDialog ? 7 : 2} + startAdornment={ + isChatFlowAvailableForUploads && ( + + + + + + ) + } endAdornment={ @@ -447,11 +704,39 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } /> + {isChatFlowAvailableForUploads && ( + + )}
+ {previews && previews.length > 0 && ( + + {previews.map((item, index) => ( + + + + +
) } From b492153f8a6f4ec642e5c0efabb38a80908171a2 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Dec 2023 14:26:17 +0530 Subject: [PATCH 03/62] GPT Vision: Storing filenames only in chat message --- .../nodes/chains/VisionChain/VLLMChain.ts | 13 +++- packages/server/src/index.ts | 15 +++-- .../ui/src/views/chatmessage/ChatMessage.js | 65 ++++++++++--------- 3 files changed, 57 insertions(+), 36 deletions(-) diff --git a/packages/components/nodes/chains/VisionChain/VLLMChain.ts b/packages/components/nodes/chains/VisionChain/VLLMChain.ts index 59a2483a..f9b92e53 100644 --- a/packages/components/nodes/chains/VisionChain/VLLMChain.ts +++ b/packages/components/nodes/chains/VisionChain/VLLMChain.ts @@ -3,6 +3,9 @@ import { BaseChain, ChainInputs } from 'langchain/chains' import { ChainValues } from 'langchain/schema' import { BasePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' import { ChatOpenAI } from 'langchain/chat_models/openai' +import path from 'path' +import { getUserHome } from '../../../src/utils' +import fs from 'fs' /** * Interface for the input parameters of the OpenAIVisionChain class. @@ -89,10 +92,18 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { }) if (this.imageUrls && this.imageUrls.length > 0) { this.imageUrls.forEach((imageUrl: any) => { + let bf = imageUrl?.data + if (imageUrl.type == 'stored-file') { + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', imageUrl.data) + + // as the image is stored in the server, read the file and convert it to base64 + const contents = fs.readFileSync(filePath) + bf = 'data:' + imageUrl.mime + ';base64,' + contents.toString('base64') + } userRole.content.push({ type: 'image_url', image_url: { - url: imageUrl?.data, + url: bf, detail: this.imageResolution } }) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 70978c6a..83b018d5 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1349,17 +1349,20 @@ export class App { if (incomingInput.uploads) { // @ts-ignore - ;(incomingInput.uploads as any[]).forEach((url: any) => { - if (url.type === 'file') { - const filename = url.name - const bf = url.data + ;(incomingInput.uploads as any[]).forEach((upload: any) => { + if (upload.type === 'file') { + const filename = upload.name const filePath = path.join(getUserHome(), '.flowise', 'gptvision', filename) if (!fs.existsSync(path.join(getUserHome(), '.flowise', 'gptvision'))) { fs.mkdirSync(path.dirname(filePath), { recursive: true }) } + const splitDataURI = upload.data.split(',') + //const fname = splitDataURI.pop()?.split(':')[1] ?? '' + const bf = Buffer.from(splitDataURI.pop() || '', 'base64') if (!fs.existsSync(filePath)) fs.writeFileSync(filePath, bf) - fs.unlinkSync(filePath) - url.data = bf.toString('base64') + // don't need to store the file contents in chatmessage, just the filename + upload.data = filename //bf.toString('base64') + upload.type = 'stored-file' } }) } diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 0243f252..92c73699 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -128,7 +128,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { data: result, preview: URL.createObjectURL(file), type: 'file', - name: name + name: name, + mime: file.type }) } reader.readAsDataURL(file) @@ -138,9 +139,11 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const newFiles = await Promise.all(files) setPreviews((prevPreviews) => [...prevPreviews, ...newFiles]) + // if (newFiles.length > 0) { + // document.getElementById('messagelist').style.height = '80%' + // } } if (e.dataTransfer.items) { - const newUploads = [] for (const item of e.dataTransfer.items) { if (item.kind === 'string' && item.type.match('^text/uri-list')) { item.getAsString((s) => { @@ -194,7 +197,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { data: result, preview: URL.createObjectURL(file), type: 'file', - name: name + name: name, + mime: file.type }) } reader.readAsDataURL(file) @@ -312,7 +316,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { urls.push({ data: item.data, type: item.type, - name: item.name + name: item.name, + mime: item.mime }) }) clearPreviews() @@ -510,7 +515,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { )}
-
+
{messages && messages.map((message, index) => { return ( @@ -710,31 +715,33 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
- {previews && previews.length > 0 && ( - - {previews.map((item, index) => ( - - - - -
setSourceDialogOpen(false)} />
) From 68fbe0ea12c89632fa50fc1f58c80e352edc0dc8 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Dec 2023 22:32:07 +0530 Subject: [PATCH 04/62] GPT Vision: Vision Chain Node update along with addition of chatid folder on the server side when saving messages. --- .../chains/VisionChain/OpenAIVisionChain.ts | 77 +++++++++++++++---- .../nodes/chains/VisionChain/VLLMChain.ts | 32 ++++---- packages/server/src/index.ts | 36 ++++++--- .../ui/src/views/chatmessage/ChatMessage.js | 9 ++- 4 files changed, 112 insertions(+), 42 deletions(-) diff --git a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts index 7745f05d..6d19235c 100644 --- a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts +++ b/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts @@ -1,10 +1,8 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' -import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' -import { VLLMChain } from './VLLMChain' -import { BaseLanguageModel } from 'langchain/base_language' +import { getBaseClasses, getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils' +import { OpenAIVisionChainInput, VLLMChain } from './VLLMChain' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { ChatOpenAI } from 'langchain/chat_models/openai' class OpenAIVisionChain_Chains implements INode { label: string @@ -18,6 +16,7 @@ class OpenAIVisionChain_Chains implements INode { description: string inputs: INodeParams[] outputs: INodeOutputsValue[] + credential: INodeParams constructor() { this.label = 'Open AI Vision Chain' @@ -26,14 +25,28 @@ class OpenAIVisionChain_Chains implements INode { this.type = 'OpenAIVisionChain' this.icon = 'chain.svg' this.category = 'Chains' - this.badge = 'EXPERIMENTAL' + this.badge = 'BETA' this.description = 'Chain to run queries against OpenAI (GPT-4) Vision .' this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } this.inputs = [ { - label: 'Language Model (Works only with Open AI [gpt-4-vision-preview])', - name: 'model', - type: 'BaseLanguageModel' + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + } + ], + default: 'gpt-4-vision-preview', + optional: true }, { label: 'Prompt', @@ -57,7 +70,33 @@ class OpenAIVisionChain_Chains implements INode { } ], default: 'low', - optional: false + optional: false, + additionalParams: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true }, { label: 'Chain Name', @@ -96,22 +135,26 @@ class OpenAIVisionChain_Chains implements INode { } async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const model = nodeData.inputs?.model as BaseLanguageModel const prompt = nodeData.inputs?.prompt const output = nodeData.outputs?.output as string const imageResolution = nodeData.inputs?.imageResolution const promptValues = prompt.promptValues as ICommonObject - if (!(model as any).openAIApiKey || (model as any).modelName !== 'gpt-4-vision-preview') { - throw new Error('Chain works with OpenAI Vision model only') - } - const openAIModel = model as ChatOpenAI - const fields = { - openAIApiKey: openAIModel.openAIApiKey, + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const fields: OpenAIVisionChainInput = { + openAIApiKey: openAIApiKey, imageResolution: imageResolution, verbose: process.env.DEBUG === 'true', imageUrls: options.uploads, - openAIModel: openAIModel + modelName: modelName } + if (temperature) fields.temperature = parseFloat(temperature) + if (maxTokens) fields.maxTokens = parseInt(maxTokens, 10) + if (topP) fields.topP = parseFloat(topP) if (output === this.name) { const chain = new VLLMChain({ ...fields, diff --git a/packages/components/nodes/chains/VisionChain/VLLMChain.ts b/packages/components/nodes/chains/VisionChain/VLLMChain.ts index f9b92e53..2849cf63 100644 --- a/packages/components/nodes/chains/VisionChain/VLLMChain.ts +++ b/packages/components/nodes/chains/VisionChain/VLLMChain.ts @@ -2,7 +2,6 @@ import { OpenAI as OpenAIClient, ClientOptions } from 'openai' import { BaseChain, ChainInputs } from 'langchain/chains' import { ChainValues } from 'langchain/schema' import { BasePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' -import { ChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getUserHome } from '../../../src/utils' import fs from 'fs' @@ -18,7 +17,10 @@ export interface OpenAIVisionChainInput extends ChainInputs { configuration?: ClientOptions imageUrls?: [] imageResolution?: string - openAIModel: ChatOpenAI + temperature?: number + modelName?: string + maxTokens?: number + topP?: number } /** @@ -30,12 +32,6 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { static lc_name() { return 'VLLMChain' } - - get lc_secrets(): { [key: string]: string } | undefined { - return { - openAIApiKey: 'OPENAI_API_KEY' - } - } prompt: BasePromptTemplate | undefined inputKey = 'input' @@ -44,10 +40,13 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { imageResolution: string = 'low' openAIApiKey?: string openAIOrganization?: string - openAIModel: ChatOpenAI clientConfig: ClientOptions client: OpenAIClient throwError: boolean + temperature?: number + modelName?: string + maxTokens?: number + topP?: number constructor(fields: OpenAIVisionChainInput) { super(fields) @@ -55,13 +54,16 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { this.imageResolution = fields?.imageResolution ?? 'low' this.openAIApiKey = fields?.openAIApiKey this.prompt = fields?.prompt + this.temperature = fields?.temperature + this.modelName = fields?.modelName + this.maxTokens = fields?.maxTokens + this.topP = fields?.topP this.imageUrls = fields?.imageUrls ?? [] if (!this.openAIApiKey) { throw new Error('OpenAI API key not found') } this.openAIOrganization = fields?.openAIOrganization - this.openAIModel = fields.openAIModel this.clientConfig = { ...fields?.configuration, @@ -76,12 +78,12 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { const userInput = values[this.inputKey] const vRequest: any = { - model: 'gpt-4-vision-preview', - temperature: this.openAIModel.temperature, - top_p: this.openAIModel.topP, + model: this.modelName, + temperature: this.temperature, + top_p: this.topP, messages: [] } - if (this.openAIModel.maxTokens) vRequest.max_tokens = this.openAIModel.maxTokens + if (this.maxTokens) vRequest.max_tokens = this.maxTokens else vRequest.max_tokens = 1024 const userRole: any = { role: 'user' } @@ -94,7 +96,7 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { this.imageUrls.forEach((imageUrl: any) => { let bf = imageUrl?.data if (imageUrl.type == 'stored-file') { - const filePath = path.join(getUserHome(), '.flowise', 'gptvision', imageUrl.data) + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', imageUrl.data, imageUrl.name) // as the image is stored in the server, read the file and convert it to base64 const contents = fs.readFileSync(filePath) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 83b018d5..84e76c6e 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -972,12 +972,29 @@ export class App { } }) + function streamFileToUser(res: Response, filePath: string) { + const fileStream = fs.createReadStream(filePath) + fileStream.pipe(res) + } + // Download file from assistant this.app.post('/api/v1/openai-assistants-file', async (req: Request, res: Response) => { const filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', req.body.fileName) res.setHeader('Content-Disposition', 'attachment; filename=' + path.basename(filePath)) - const fileStream = fs.createReadStream(filePath) - fileStream.pipe(res) + streamFileToUser(res, filePath) + }) + + // stream uploaded image + this.app.get('/api/v1/get-upload-file/:id', async (req: Request, res: Response) => { + if (!req.params.id || !req.query.chatId) { + return res.status(500).send(`Invalid file path`) + } + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', req.query.chatId as string, req.params.id) + console.log(filePath) + if (!path.isAbsolute(filePath) || !fs.existsSync(filePath)) { + return res.status(500).send(`Invalid file path`) + } + streamFileToUser(res, filePath) }) // ---------------------------------------- @@ -1352,16 +1369,17 @@ export class App { ;(incomingInput.uploads as any[]).forEach((upload: any) => { if (upload.type === 'file') { const filename = upload.name - const filePath = path.join(getUserHome(), '.flowise', 'gptvision', filename) - if (!fs.existsSync(path.join(getUserHome(), '.flowise', 'gptvision'))) { - fs.mkdirSync(path.dirname(filePath), { recursive: true }) + const dir = path.join(getUserHome(), '.flowise', 'gptvision', chatId) + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) } + const filePath = path.join(dir, filename) const splitDataURI = upload.data.split(',') - //const fname = splitDataURI.pop()?.split(':')[1] ?? '' const bf = Buffer.from(splitDataURI.pop() || '', 'base64') - if (!fs.existsSync(filePath)) fs.writeFileSync(filePath, bf) - // don't need to store the file contents in chatmessage, just the filename - upload.data = filename //bf.toString('base64') + //TODO: check if file exists, what should we do if it exists? + fs.writeFileSync(filePath, bf) + // don't need to store the file contents in chatmessage, just the filename and chatId + upload.data = chatId upload.type = 'stored-file' } }) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 92c73699..d2ff51d8 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -418,7 +418,14 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (message.sourceDocuments) obj.sourceDocuments = JSON.parse(message.sourceDocuments) if (message.usedTools) obj.usedTools = JSON.parse(message.usedTools) if (message.fileAnnotations) obj.fileAnnotations = JSON.parse(message.fileAnnotations) - if (message.fileUploads) obj.fileUploads = JSON.parse(message.fileUploads) + if (message.fileUploads) { + obj.fileUploads = JSON.parse(message.fileUploads) + obj.fileUploads.forEach((file) => { + if (file.type === 'stored-file') { + file.data = `${baseURL}/api/v1/get-upload-file/${file.name}?chatId=${chatId}` + } + }) + } return obj }) setMessages((prevMessages) => [...prevMessages, ...loadedMessages]) From 32575828cdf49b976480912b50d9391477e7bed1 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 8 Dec 2023 17:21:53 +0530 Subject: [PATCH 05/62] GPT Vision: Converting vision into Multi Modal. Base Changes. --- .../nodes/multimodal/OpenAI/AudioWhisper.ts | 61 ++++++++++++++++++ .../OpenAI}/OpenAIVisionChain.ts | 41 +++++++----- .../OpenAI}/VLLMChain.ts | 0 .../nodes/multimodal/OpenAI/audio.svg | 1 + .../OpenAI}/chain.svg | 0 .../nodes/multimodal/OpenAI/list.png | Bin 0 -> 5002 bytes packages/server/src/index.ts | 16 +++-- packages/ui/src/assets/images/wave-sound.jpg | Bin 0 -> 330686 bytes .../ui/src/views/chatmessage/ChatMessage.js | 55 +++++++++------- 9 files changed, 129 insertions(+), 45 deletions(-) create mode 100644 packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts rename packages/components/nodes/{chains/VisionChain => multimodal/OpenAI}/OpenAIVisionChain.ts (94%) rename packages/components/nodes/{chains/VisionChain => multimodal/OpenAI}/VLLMChain.ts (100%) create mode 100644 packages/components/nodes/multimodal/OpenAI/audio.svg rename packages/components/nodes/{chains/VisionChain => multimodal/OpenAI}/chain.svg (100%) create mode 100644 packages/components/nodes/multimodal/OpenAI/list.png create mode 100644 packages/ui/src/assets/images/wave-sound.jpg diff --git a/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts b/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts new file mode 100644 index 00000000..b308a7c5 --- /dev/null +++ b/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts @@ -0,0 +1,61 @@ +import { INode, INodeData, INodeParams } from '../../../src' + +class OpenAIAudioWhisper implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Open AI Whisper' + this.name = 'openAIAudioWhisper' + this.version = 1.0 + this.type = 'OpenAIWhisper' + this.description = 'Speech to text using OpenAI Whisper API' + this.icon = 'audio.svg' + this.category = 'MultiModal' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Purpose', + name: 'purpose', + type: 'options', + options: [ + { + label: 'transcription', + name: 'transcription' + }, + { + label: 'translation', + name: 'translation' + } + ] + }, + { + label: 'Accepted Upload Types', + name: 'allowedUploadTypes', + type: 'string', + default: 'audio/mpeg;audio/x-wav;audio/mp4', + hidden: true + }, + { + label: 'Maximum Upload Size (MB)', + name: 'maxUploadSize', + type: 'number', + default: '5', + hidden: true + } + ] + } + + async init(nodeData: INodeData): Promise { + return {} + } +} + +module.exports = { nodeClass: OpenAIAudioWhisper } diff --git a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts b/packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts similarity index 94% rename from packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts rename to packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts index 6d19235c..4151b4b0 100644 --- a/packages/components/nodes/chains/VisionChain/OpenAIVisionChain.ts +++ b/packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts @@ -19,14 +19,14 @@ class OpenAIVisionChain_Chains implements INode { credential: INodeParams constructor() { - this.label = 'Open AI Vision Chain' - this.name = 'openAIVisionChain' + this.label = 'Open AI MultiModal Chain' + this.name = 'openAIMultiModalChain' this.version = 1.0 - this.type = 'OpenAIVisionChain' + this.type = 'OpenAIMultiModalChain' this.icon = 'chain.svg' this.category = 'Chains' this.badge = 'BETA' - this.description = 'Chain to run queries against OpenAI (GPT-4) Vision .' + this.description = 'Chain to query against Image and Audio Input.' this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)] this.credential = { label: 'Connect Credential', @@ -36,16 +36,9 @@ class OpenAIVisionChain_Chains implements INode { } this.inputs = [ { - label: 'Model Name', - name: 'modelName', - type: 'options', - options: [ - { - label: 'gpt-4-vision-preview', - name: 'gpt-4-vision-preview' - } - ], - default: 'gpt-4-vision-preview', + label: 'Audio Input', + name: 'audioInput', + type: 'OpenAIWhisper', optional: true }, { @@ -54,6 +47,22 @@ class OpenAIVisionChain_Chains implements INode { type: 'BasePromptTemplate', optional: true }, + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + }, + { + label: 'whisper-1', + name: 'whisper-1' + } + ], + default: 'gpt-4-vision-preview' + }, { label: 'Image Resolution', description: 'This parameter controls the resolution in which the model views the image.', @@ -122,8 +131,8 @@ class OpenAIVisionChain_Chains implements INode { ] this.outputs = [ { - label: 'Open AI Vision Chain', - name: 'openAIVisionChain', + label: 'Open AI MultiModal Chain', + name: 'OpenAIMultiModalChain', baseClasses: [this.type, ...getBaseClasses(VLLMChain)] }, { diff --git a/packages/components/nodes/chains/VisionChain/VLLMChain.ts b/packages/components/nodes/multimodal/OpenAI/VLLMChain.ts similarity index 100% rename from packages/components/nodes/chains/VisionChain/VLLMChain.ts rename to packages/components/nodes/multimodal/OpenAI/VLLMChain.ts diff --git a/packages/components/nodes/multimodal/OpenAI/audio.svg b/packages/components/nodes/multimodal/OpenAI/audio.svg new file mode 100644 index 00000000..3bcbbdcd --- /dev/null +++ b/packages/components/nodes/multimodal/OpenAI/audio.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/chains/VisionChain/chain.svg b/packages/components/nodes/multimodal/OpenAI/chain.svg similarity index 100% rename from packages/components/nodes/chains/VisionChain/chain.svg rename to packages/components/nodes/multimodal/OpenAI/chain.svg diff --git a/packages/components/nodes/multimodal/OpenAI/list.png b/packages/components/nodes/multimodal/OpenAI/list.png new file mode 100644 index 0000000000000000000000000000000000000000..acb4e5d68f200207a97e10ee63125eb4e040fcec GIT binary patch literal 5002 zcmd5<2{_c-`~QwX$Tk=|b*-5x5|dmh$v)YQu`66;%_w`J8`(7^WhzOw8AjF;6AD?9 zt+9+HvPP0U!u+R3|J(Ds&vWnd|3CNspZ}R>&ikG3`=0lG&pDs-dCyS$siOe5fsVco z00IF3i1q=fgTOU_Z4V2C2g<|4!_ULb&Br4I<$*xBAzVB>T!K(pA!(=}zo@i=lBhU* zpP-Ki5Tzx^00w=_^+(&o!obAL2>zzk;s!uq5CaSAUPcB65Ysmc5CfQz3Bt?+ z6;L!!?b^fti?XSOe@YoIpAzDf8RjCkr1YF$R<}gt5lu-cX_uSXG%tKVd#6YF_7?I} z%-y#XfE`TB2LgrwCxJT=0HBDfUIXvU1mzr=$?Av zr^3Z4(hY_R&KYZY3vo43i7l^BK>3|f0$736v|mlw7O&pcHT~^%iViW1(_*R9ap#Cq z4sZX2A)EEDY|Yb2s@Zn#6V48=WGjfsPNJZxUh^#8*~zX+FF&?3!)HeycQp?`a5JB9 z1=jrbi>(rtcUwXNG(g{w7lv%8Idl(d>-C6I{Arme*ec-ze#g~UsLrZ}ck1SF{E4QC zp0kGqBEpaZMkCmR)#mLy&}gj)|P46p`XO}6aW zGW)UsF8@9VbS;CA&|92K*tk>U#vlDtt`Q|cvh8xA{8wb3ccxkMB^zmb`CzWhkj?9_ zhIOZxxV;aV&pf5#LIvtI2s}+2b|0irp(B(2Lz5&_TdVWbq3sjbiRvsad#<;mo~YuB z{DW<#h_Huw6;Ne4_s0%J%cfmz661QCR`=I0lDZz^w|(K}EBs1eu}e$>t=eM`M4%M~ zsaUrtfeP2fw1P{Yy|YIw+1g-eeb|Po>Te;NDl@MfJHJc>BQRd?p6*5NnjEQ+f6h@J z+FF5~Ah(oV8KaBQL-Lk6ef5>eL9K6}eL32-0PEFFMa#5*ji!r;@+zBjM`nupINWS* zbWpd3U@dNFhg10^L>CO*lOFbABJS%6Mabgcy~US`T+2nF6!1r9O zIi4eU+s&)iMS6ry-PU*!IbOrr`F;PGdECv4ZJuWoPq{AMwGmR5Ll-$3qL-C|-Y_(d z(m{29&_FuO453_=~!60>4!hGIs8 z_KwL55ow5T74I4{vBs+r89{Dyb$D`nA{7JlxL{KZo56-DI{GU6IgaWJNex9P4@kNE zp=#GPj!EAa=x#14;BEqs7GBvaj|cjUqX zBPn#y|IP9GHq-OUF}+oDRDl26k&&J%X57e8aO!AS9fVVXt3EBXE|P_E7fvz8wi#iy znU7=(myB20hD+X^w1)*NWqEyF^x_SEt<_M)zG%XUPmT&jC3`+vGS`XUIkOZLH?@&7mpJ^gV`^!6{imGa>de!~~09Lzo8V=Mhnz!sShqtV&66&M|tRaNE`R2R> z=ee-tFy9m@44xpI=x>U;oIZ0rxH8#Z)-OV5p))6;ZpHC%_J?J?8s%fZ%5!AtR=9+v zC<>&-B|53Cpp4JSb!#Pf+QfWovCT0t>2?u12vHHU@hZPuT6KY^Z{cz*caq<3O!eYM za5cjX#F9LXCGQ!A1MNjBB$eRmXz%l+57q5M20Ml%aLbbiEZIlPM^=WXJ_%LiuIDbO z_`x@hu5ihWL`Wo1dO~{waZg=$Jrz$3b{@xt0C7s5h6{!xK4Ul=-w>bWK%UxE-@gpO z84fNyFymyEjjqB^25#VCBef-+)tc8l_6I~UJpB{avHbseAcDjme1|`~RA-Ju#bt!! z>-O zEVc!P7-lQdrs4K<YO~`ls1VK8muN?4Ax*CRuRwtGbPCr;sh5 z;iddhrlx*XQ2x1c!D-IsIn!{ph}rs9H~M4rg^x)U0?SW13q^BxA9pqXd!t4Q-{%4T zMs-_$8@EcUuGS&fcL7I2x%|ryKwtieSz{jmiGS11>6r}EvHm8zK*LS7+;^aZmM35&Em9-cW}7LYuFw9IR_zuI@%uBvgp07vO)n);Z2?L>kb+Ho@#Dvq z90J(f=)H?Ich4k?doXjHBo_s>yq{uBJ680=NN@=!@Mp5d1LcmD0;X+>8>4jX!}7!Y-Y_Wh^Nw|YP;6&S^phL7f@{-|t}c2u;55z1;L<{mumfh9{1 zmc641VU7)kszQH-a&;}`RUcRzE2qAL$sib!!IoC`Tb0@Q@tB(-(cAVlEZIqGeYWY<`&LB*E1wOK z`l&zxe32*F9`uMkI$#p>q! zk_YSJjxBg!stPTk0;aD*J&W3glQ&apAqQ4DHmigFKAGu-T~qPv#%xT6d$oH4WgiD$DG=4Uw@JRY&uH` zk8>28(2lOzDXVlW`$p{S^ekyMgtl zSEOF(Fm$xI?kO`W)2V6wfiQbx-epLs2TlIvpLa%EuBacbdu>JKD5x_R{VHKPRr3DX zy3cIW`;#zB;g4!KTm|*|`YcuPNgl7Cz%Y3d61;nu z81+3q`|eG%l-1{;0;?8F3C5Hii_MH0ikck`F=FTwOUPjl8@LB(4wMH}9rdhn?V?N! z5SQGST`zJSb<^N}?kz?%%vcbjYcq}y+yl4Y>10&E>3XKRRyWbLGRZVk+6d6zsqe!V z>DrcTnkj7rpt { if (this.uploadAllowedNodes.indexOf(node.data.type) > -1) { logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) allowUploads = true + const allowance: any = {} node.data.inputParams.map((param: any) => { if (param.name === 'allowedUploadTypes') { - allowedTypes = param.default.split(';') + allowance.allowedTypes = param.default.split(';') } if (param.name === 'maxUploadSize') { - maxUploadSize = parseInt(param.default ? param.default : '0') + allowance.maxUploadSize = parseInt(param.default ? param.default : '0') } }) + if (allowance.allowedTypes && allowance.maxUploadSize) { + allowances.push(allowance) + } } }) return { allowUploads, - allowedTypes, - maxUploadSize + allowed: allowances } } diff --git a/packages/ui/src/assets/images/wave-sound.jpg b/packages/ui/src/assets/images/wave-sound.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f56d67d968643d91c1dce1689a84711d977220d GIT binary patch literal 330686 zcmeFZ2i)7#+3@XqWRj2#fdq1s5Jt=$OR{8Jfh5RVw&fvtLjg&aY)kTz<;kWgGmKIQ z5Jt-=5N3c7Mwwxk8NyB}tE{p@34{_L@3EbPv`PE+ectck`F`IgLvCL_I=are{^y+Q z93AP>me;ntZJ6kDlWs#_e_vm&FJ~}pd8coxtC<%K28uGwGZ+jP7zX<$8wP;X2mUkk z!G?Yy?+^Yh>O;EnR3LMwe>Ni4%rMMX6OamDNh>ki#*AKrl9RP$FN< zlp5884dAD30R;ZH4k7RYvv~oG84UgB84ROrebwI z+p%pvUn-cIY}o|QhfIdU7B|Z*FALfbCuH-*g|ol-!-KPj@|lIR6Bq?iWrvW83*~~Ku|v&*Qmig$E}Y#}z5vLbWYg@SZWc{mIJ-wQNri_T zB~=*0=39n&7_kjm@%b=@!w7mvhp)v1nN6^57)BRB@B$1Q+WMIdN>el90@~@`S{C@V zaQ0TC8jZ&MhIxKT&6;2w$4wAoLXcsQW4PKXYHV}3Se>&?gHx#TYQC&>uyr)DT&bol zoDFK)H6g=q*IKTrN>^uwHwlVR5Q}Ph3t;H zDeB*!P$+EAUDaseCmg%1OEuam3np3sJxJvRkbTa!+Eq1I9T50;9poQRcGUrj?WgB| z8YH_`vO|V$Z`)RGd50GKhbn^uAfp*rm*E$PCAGkU>g5Y;Rxn9rA=^2>ynq7zE5mB6 z6ZG(fvpZvP80!2Vh0q0N9E?jNWL*G3TQiq3d9n3RS*_7+!#3T^hSY>^j8=fc^*iuw6`W`T{zGB68c!-(0E8MX*0 zJB*7EIt*J-oa1d+210~w=Q~S$t=ou2{cBLa${sK^dOEFui!Rumt$2@pPv+5{GZ zIUKjZ+_v*WY95SlR@qTctp=1}P6V>y2#TBSZl@iFaTq0R7!EnjFb3HiwrvV%p_s{c z29HBwtJQ_G%WA14E&weC+{I5FVF9DDs#Yt9)smQ3K>LK%d{LV{1oABaX_*=?sN1-O z!H`z+oXV=LAsCwvnhcC}(C}JbQ-rOMy;8YcmVy0vIKo84ZCMI9HsgO07r*jgrE+Bv!7j(1K~ z^2J@K=qT>=`JMBFiHMhXPR^=qIhW^mK2WYH)m;>@y_rBG?tGFa@VR0M%qOj#kFi{5 zGUTs=$}jU9?P96I5Xr9hFmd*~K`JnpHbXkDX0Wgu1W`i!=FZr?NZe2YzAa z#HzOIfx`iZT-`nfEyi>n2Vd`8QG zl>lbx&Q;yCoX~0Vg|jWabwD?5+k(Ouc6olv*=E>DF9=y$Z@CNCmY7{}B5m^ZmMu_Ns>;%Z)`M8J!<}`^ziAd*U@QDz1 zlMyAHvRmd5pF((|-DE;EaRdntW+N^;5p&tIF}FP{d+d>v&(X+w?AdIr!tEJ%Jn27_O*UX?Buj*WxkST7_e>^8nut&|FwI4gRtG4+Nn|4qkOqhVMMVlU7Dd|$!cEd- zGZJ@sT!<^rr(7|r?8(`iiDo1pws#fAG;fY-rt>xj-OAQoN~BFk<%++RGKU?(SfnUh zLhWEK6H9xePN+tbz7pkgKGLO2wK(#eEdVH0pOlT`3T z8p)N}f>z}cSUqK?;Y=}u3dyon^uk8b9CGDStT#^<{CTt$luAa2j4^SgETjtQuEGje z^dUkK^GjuaqENQv%auy4s~7ebE{JXMU4^1kE(L?8 zqKrB65!B}6V-W$*goAFo(--ucJ$@f!lLSY_FIxqKCbWW`^QOg+ol965NK45wWK1hk zv6gn&VqJw5hT_VM-P3Yfor$QuMtc%PZ;T6vjcuHhopuWoZJVm5L@N|xLNsCH>=0-= zk_DaRO>j5bk^q z;j=AFWRoT$L}t1QDNi|=^28kR zNU`b4`WZ_NCq$bPb4MW~=~N{xj)XIU)tkrTF;UUrM8Z_ym~zQlh%j!N%@uiXrBz9q zW0`2Os}O3j8J@9gmXe7gCAsQ?16<2bLDm>2n6fzQ(d-ebY{}(IY{8ssHG>(4(^`n7 z6Cv7Rj^Qya7}V;j8%m(=Y_CyNkHm!9E*BJc+fxwX3s)UHm6D6;YK~|H?ST@`)qJ>^ zK(ee)3ln)-D+H=Gh^m^%U_v3PY|I_PS}F!=CQNf-)uS|$`2-KcUN7GYw3%Rm@VO%;Tx%g_NkN+oS#_qH zZ6)7T=;M7MOCaE{D0$%LD~WK#*=%W5#^P*y%Pyo)ulU>9gpCKT7UQUj$IeN-XiF8G z=|DE@Zr}pLH$s%Fg*i;suEKagOyI#{QLy_WzL*eqsGxxzEz;C*HlT`R;a$0g4M{hd zim7P~$~2dZ1XagWIhzrBmgg~|@8UdQu%Q}Sn;Zo9+mRqAmplPcC_Tf-~| zwSYx*M&X1Pl2HOKF*&YVLjlwPO!Ytl%a~ma!N+)OnV7dxi<2D6DIUb?4cN-Kx9+u&wT##2 z=_(}MCW-Z03Q0>Q9d-us1X}kxN^-hU^an#UPWfmBg%M|lY6inDI#0uB(GI($P%6yG znM$I72<3!J%LzV<*6TJQqFT*>x!2`Uz?_35nsS75GzzIgvz_8fCM?FLQ^Aa}Or_(B zQOzT4&gru-5}XL(Vc=`DIzmT*A$c+H=y@Bg4BNDd4N93YG0|peCs&|pk*=Tu&({L( zEF7%hZVb%-QA(^@Jwb}I;7PkR9>&xb@Z3m7b+d_TTyy#9o`dzfV`2b#b*l{QBPus{K0(`ng}h!s%QT?>Fc7iYxeCjCw@#OQn`m@^gZ=}f7OltZRK zB3CcFivgz(%(`;7QK~A#Yq$d1$&|=?5)Kz>V*ET8E`}lqSo@V@Jd6hNY#bw%KuD>< zA%D##mfcyFuUGjTQ@7H_d`hXp`HZ8`RTvWDnGl?^0GD5KvSzpjYn~#Ub@7O1^^v{= z;%17$GEufdm7tXL#+ucvRN{;#+|Cz+GEsF34Y6JoOX+ah(pAW%k-X2svxO94w`MEh zSSIUt7dbE&@Ms!y`d}NC%DW^RY*H|cwY5V&tI;V51X+YNr_qK7Js2Ofs1Z2K^_C*Q zRh9T6)v#2AaEb@rz?i5j9$1!GxN3@kRdFm{AUVnGOUb4%6^L1c5LnM=Y7!zTL zOjII1iA)*8(VRuJ$^poga^UtT>7YHyW=M-0&E_f|l|x)_UT!efhNu|psv$@$5YWOucpiWZ4RGI%8bhf_UolWR(nuii?fc)FEL(kg8V29=7^^xBI7+V?+$a znHnByn9U6&WJ$KtM!RgynabIMt6uQMAa^j%AhJx!VueO5urDlvo=fmzz9Mu@Mi|M{ zW|~d8g+{A!qr}#OVjy721Fx1t3jr&c=LD*n2m!BDHhLmnN~-y>RNhB;`9i_1X7Xw^ zzU@y};ZHwzl2|HlZ{$*T#7-t^7-1y zV}h#Hf?PS1s2WR28=4b1StVL(!c}gitTCf%&50@8<_l&C+>IgDOakJ%`hg)Z0nj_#{;k*h1#)CPZ}=iY~K@tJbp(B+R*ER1wRY6cgKw zG-$TW(UE{V;U;l89!sW)lDAPP(yU-kx(bjiTNS%8U2g|8%?a0_YO;!T6=Er0jVy?e zIi9mf4k=5yjdoXpF8C}q#tt2EmFRi4<#mAfJoW@Q7O_vr10J-DW9otU9wXCsTDr zTzpML5W?6t+x%$7CRa_to`Xd)MS-=8!HiZ37aTYvc-S(McgpXiQBivH8M9uGts&11n&5C@x z(F_N9y3t}HwCeSdo{ZZUk2dUESM1s~L86ieh zV_cM~GQo(Wk|+IGxlY%ss0tQhG>5Q8r~uV{Xm5R(Efn)$(k;3@F`0uselKV*3=+~M zF>Ga-pdHLTiIkt@NIy-vH7nnWaxRZb!EFx~W`p4>%+~yDEa*3d#CEraEUu_A$r21^ z!d!!iMp{i@JP>c=>5P+d)-9=okA_WYPAC)2M6u+>Sb+^PwuU7b4vP)V>*Jz%vr8hJ z#b!RzRanhbI9r~z0d$80{*o!xEMSZH$z} zViL=0CQ8xD6@k&>E?1T08&SHcW*DknDY0aLZ1L_2*oF}?Hl;=(57!QOIb*^DTdY;J z0Cg3{$$DI979wK89c1mE1l4kAB2ff4PDxXnv9*0=9xet-vX#Xx{sz*9q(o4KWp|M_ z<^47hs(7+k9JPs(vDx#7bgTl_GhCwNpnc__x0dD{7)x8@Mn7q#8E{2bBQeodmEC4P zEaG@BN*7}8crmPSu}lOGM680Tjyk!hIl%O``YoWHE~M2Xja>o|K__)R!(6 zy9^bLOcrERDElGSUBE+ihw6%ly)r9!{Hjt7xLpbzV0|%gSFT-g#q4HZCCSHtbB$K` zc*G(kaYwY+fGkK7?p@KhWVBHV(ct2-;uLw@7LNz&OpvOXWx=AAFr<*j9g$X`rFv=D zZ^iv8g$6^-W<2dJ2{N6`)k&4{htO0DbcAjVOI{yV$g_0IUQMJ`fdUVwIP;ZIGajj{ zArG$k0&JqhL_{_Qr4#mw>d#mxm2e40e^sd0TZj!T4k5T6q1^4B`$}tK1vi83jZ`^Z zf_R>*S?X~{mi%TRhmc6A(vmted!rBrTL}pVR@3N^YWAv#)}+#EkV~}^wyNky>t&=j zN!3)7JDy?E!3yVe$H1dAEn~nNOVpHtGm=ZkvpI*0s;eHd-sY?3P_Yzf8eL*EU*XV7 zmg6G0S1Z&!*=i}(&TPHbGZPMn1ReouHXTJqW=llL(`d%&WJ|+@BtvRmcg|^)QtmX# z@oLNuR%JE@T%B?<*z1nQ&`N==R4VQm;kWm^4Z#sas*>{sy-)=l55@#o;$nexI+m?5 zC9LFYda9ZetycYxdLmnfnsF#3S>gl^+2UoFlq?Z7i{H=Kz2YD&lCF(qTSs1G!_7n%6HCO8#7h%X+|~+6sC6 z8bdc*B*f=)P%4%Nsz|)S#}nZ+QkBhW*wNcrYC&;)C3qIC6q)o zwC&|YYrEX^dcvqDZzN)+u$imY%qajf?#Piwip|<-+GT9j8ohbh(?C4_dN3)Of(&Fs z!3Mia^qU|&>UKK0s^G4rT9uZIrWM#o2K_NSDuHL-6!6?YvQUqjLJCwXh^PSj&DCVC z%dm*C*>oz8g02i9-ax5r_Q%bLk&KyRJ`M{(lC8ysV<{|56;uSrQ~5R?L{oCf3}+cR zmxt|%N+yBW{GyiX4Lt>iF|UcLBy0sC+%DKMb+8}a@>e7Fgg?&avmQ9kp%INGxNO!6 z8Yh4|g>(~(<{-$TwqQ^ftzB_D+Zi0`T}gr4YA9F$)&0e&sOBr}R<2Y~SwU&xNL)j$ zEzV-$;t6NV!KNc=7`Vy+4%YSr&SY%ysKr+260IE0Vlp1|_e_o>0jdo=TcuJ8P)#f0 zsJV!uT%Z_%0krYBY7vvbg}%EkrISj-nsm4vsYtU`?0-R@RZU??;ni-LttD-aB1I9t;aEmfJi*ycS#nejz>D~C)rRCY+36-x#J4l3u8 zi55mljwF%Jx|&iOI7x3oq%%Se+~`0=*y)J)N(Fm@b_hjZ6ors*l&Xiz7%IiGy@|e1 zfCX^rpN+w-Tr1{Q6B?TY_s|&&xPokGTDp-}TXnRUB8a%pXC>19&PKRL2z&kM3Rx|K zJye?s>?%;MY`(|Pn5oA&tAi<}T4oUi!%r!Mjj0@3%$Y4kvTX_Y@=}xWw8?6Xr^|)7 zJ3vJ$6@Rsf5QMepPIwY#tRc``G3M>ZlV z?2o(YHVLj5tr@;eg>o&--Xyq?w*VsnkKih1z}&(FQ;<0k4Ci5IG!54ROe)UMBA4op zWiX(!Svy)iK0Y6VfP_fC?B+qkchiSQMYzZt>>LF>liY z_Eghoz2cxem7HRf%N8c$i1_794i1}pvz}CoH0rG)R)eka021WP(K07^FegBf@f0vT)gE#T5+J^H{Hy z>7)q8C1v!LY1xM-Jv44I2L;uaQ+=syI6`Jk8Qki{Q#QmXro=W~gevxAGDhlgrltpXut1QQZM8y3-(3Hp;E(IM26B|O<`!p^pn z3Gyb&ZVIb(5^9SvU!cHHY>HG+y5VO^U4@|AD0o5Q^BI%OrolF3TWO%Eu@HutkhuYF zFo8|1P$nofvq^zT`n9&TZ6-vpwHz)YZ5V9#N4ZM79tXF#k>0LI)Y9auxfD$`?2?j_ ztbUWdk*+mTu+dedR0myi1Xxhj5K2}{BG~W6O$^WHlW<+|dDv!9IOtw|a!qcyO7V==>NmnbPf zGBUWu>gQuo4`Ii$q}h)&Q9B0wPsRgRELA*KZF_^Ht=$a8+1{qE*_Q=(A;ATBxo!o+ zx0!Qlgo&1wBw9ugZ&iw5DO)nv4BAXkJ!Wb(s@4ivWRh@`fHgd=u!7&+h^J%eIJh3_ z*0APR7*SCQu5`sDv`{Q#&031~Y?DEG(v^~GRwR*TQEOARp`?}ox4VK`g`-18C5l)g zd7e;Qa=cKH#1`y~cNMyYnla^NJvk5SmMmG^iO>b9;jv>#QN*GaR#e^13b^H$@}&xr z3#$;#v`ogFxGFV5rh0)c2@qA|K+lK>{;onhhFeR)BocQG$s{#d8XxN9UHSjp1iHL{Ng4El`wg<$DH*5{HXo*7D zD&;o?*hW3Wz@C<@1^7mpYe&LV+{wEVUa3`ynmGwl;Sg^m#5N9B$yP=|8o)=|a~`}~ zLnG<6a)L3fC5v*^*0ylrN>ORY@KBb^1%*VlN^$XW&EjU`*0>sVdgwgorL_j*N@t6* z!{&#qk!U%VM|h>y^M{3U8;&8B3WJK0&*LxIV=<>x3KdeGaFi`38J}XbHl2m2X0A1@ zQER4Q;h>bsT!`7S;apq5Ek3r5fL-ml#*5t=nr)PeWeAVviANlM+>)W%pjQ(-*rfI} zG7(=}v6M}*Oxp@^ImYI-VHU7QpTXpM0`^08!Kx^7zL;WMj3d_Thv4!zuWCOFd1Yb=^S?ii0i;7rw0%fA0)wo2+qTp=Eooxg|j&j>y1)EFV z8lt9*TyNKsz|ax*TJpTp29{(O$P(soFq{Vp%h?*rr4IPwg^pn$%+??7?Y?Bv(Y#gay5}k zL~}q!G!a#rrL0;tBO$tMHVR=|H63I^y$hcN1)Ls}&E!NG%YsFlw?+uHbi!OsK*e?w z6Y61#Dkuo7hMj!dT}etbS&d>ND@w&%2&EcRX2n;h za<-Z(fO{6;;baKaETZbVw@a)R;zp(=c$=^#={AK>lM4;tQD3#0^l#hm z-f4sTUmfUno9)}=-P774@DI77$pe|eL+w6WMZ;sps-WNu77L0|R0PkV3gL>VmJ2=! z+(m<>EE)>5uo?!3vEG$bLyX5W0$wzN@lc1jj44^i(K;;~wTx(0vw|2EtwEuX&SJGv zPIURPDq-@QO?ER};Bc3itgBvoJz`?KWm~;lLmG1`4RBYojH{xJZ`iy!V?ag&7Cvc1 zObt7@n1VwgRPceTI!^_IHL(#17lC()c#Xk0&cSsWqfO;-u32vNRxIRLQ8U>g)RZs^ z=57s*4Ln*lMeU}X=r$=~Cs?+bbE+v5Yl257atIm*Gi5PjHWB`WsVbV%vrXU`%N?JH z-{}ENkN^T1oPwIK3mLasDhzd_PZs2N2yN(w6hIilLnw$w;CRG5j3OcjBn}yd1kM7n zsM&^_k=a{M&g>$t0qnt9L>tTwi)I`jHfKv0V| z1?sqDrFP}wShn1pT&%>pnPfk%%Q&aQ%I_%ofVfxw!7-~#k? zLq0f2U^$x&#CRYU^DxAt;xGpuTph+CGtOD8IA&(yZRa;~0#FG8T&%+&r~^WyFth+d z7C`20@hH7wz_Lnfu&O|0!Gp=$Vk^j>LNay^VB1yzp98@Z0*Li8bsN7I%J6?`(SH_% z(yQuri*^jz*)hWA{}mQtxY@?qY_?$xvss2&2+a(0Ru&%SAr`Z7C|FG)=4}XfX_0LK z2K@(Z6wZ~jQZ-j94>{O^pt3_W+Ze*|BGh&&ti@KO*8EnS@mz4 z>0cT3Q~2cn#=Ncz>J2f|KaaldG3P%#sNPKi!GRs;wLL0v(YEM8{aYuX6VM6h1atyA zf&U*7`02d{V0piAb_2Y%V9RF%V}DwmnCC+Vo1LTvKI^`pVPfYU5}h|iZ2RsU8|=Ip z!qC5ZW?$McFk`M~CU}EHzhQ7@{|E#0e;Rf?vwttcU?-%nBaG;V3wM(`5yD-e6BOJL zMsJG)?q=+U{&u8MTcdrusoenIjx=t^nBGpp*q_39J3mKni`?z}80-Y=c7&0=_}q>- zYHLVtM;_6Q!tF}kz}t>AW_zq{=ht8-ytX5Z+8R;Y&9Xg+w)1O57fe^`hRSxNk-g~H zuGkBR?TDkc#=v&-cEet~8G4bfT@eHXg1E-6*bA@hioFoljyR$lwc3%!{uHR%J~ytDtMvGxK4U!TyW9pK2KR>>0!0l-&#iM_vJr^lfjqz?it9Gx8F%KJ4{-AcN!G zzyI*3|6K61A9$&uPO^iJ4)Ha$`|cz#COSzY3}X!w3{wqz8TK{IF&tzVHXsJ8;ZVcj z2A9ES2pOV=l;J1?Z^#)62G!6o9A`M$aH`=f!+C}a4a*I`HC$s@VYtO`yWuXweTIh( zj~kvgykL0Qu-@>t;eEp=h7E>q44eD<`bPDQ@0-%MSKq9@xqZeyq|es3sL$0G=%f2m zeaG}=`-*+FzT^6S*>_go`F+d#uIyXUcU#{*eGm6N)wj0q^}cudKJNQl-*^23{p0$l z_V3ewK)2hYXk@f|bb54g^eLm4jlN;@1EXIY z{o&}%V{xJ88FzcU!sJle@jM+uz4c9yf2CW89JBYU9oscip&$#=So7 ztMTKOn836rxV9aJYb?@B0KSfiI+~iXX2|9zn(N@5{DjnI{VEz zW9QiBh;uHOvu4hQ{b%h@?SI_z#9+z(?R18T6|FDpsNmg>EMwE+YXiwzWm@9=Jn6R=H=#HI`4U7ztLjM8!tDm zJ!Hfo_#wqZt~%t^;c>&naDDj3;dkcmF+VW>l=*kf|J<~{DQ)_-=?T-15C$pGHPCu^ zGVFs-f$xR?hRj2FSU$Gy zZ#~9(ne|oMR9o1#)b=Dk0=MHQ;`iZyUtn3FEx3Kb7l#fXDj#~op&uP~;9l#lui^{3$6U@>@C$Pgk!zX`n=J}_Jf{~myu8TzJI$M`A~wdgLp=IPe$EUp#fx-bX1%-GB6iquHbHIA+8#iDPa! z=6g28Ud4XRk=$k62HwG6#D9`GJaa+jV_~6izVM;AP&{A!D7!HGo9xHA#kpmQ}F= z_EmpTy<3|K7E{mE4yygC_D+3K{nGlkjdq^`AlnG>x|ZiZFl?k#||FLAG_wb z{f;~NxOa{xj=%DRz7x0;)|@!!#3d)bf0E~<>rWngvV8Khr$DEif6B&RCVqMEl6{t( zwB)^Cd46@{sk@)5p8D!(ho1J^(+5w_pZ?q#<})rix5Ym0vIZ_4VgZI=_AXd%vN6v-*Oe3(mY?5izdr|G8w=WJ{eD@{$Uvl0h-!IQCfB91TrMF%-^RhE8+jM#6@)xf-{EAzCyU%aW z`t5hW%m41RD?L}PzUqLhmR&va>iX3mTob?MiQn6Pzv9}xuRZJ9AFeB2_xAPl^^dNw zu2^xy%p1b}*j)f?{0-Szg}sk_(S<< z17jap^1zk{+Yf&IQ2C)xAI?7f-kPJ=tbZi=$V-n#9)0dH|6@-+?tc7{Cx|B=d~)HF z_dT`XseArt`{P|tTb^G14ED^O&tlKs`JCms)z4d>zxxIJg?rZ?w)TM+7r*%MI_J8_ zUm{<6`sLuuYhQ`I^4hCMzWVlSnb$sgU3vY>H|lR}UVqY`M*Qi_Hz&Ti?9Vg*eAQbA zzjf=|mbdSJ$NA1P?=tVMe~*9flfP8|^8NcuJ{bSOvJYo{xZ)$^qx(K~fBeE1)fb0<@yv$ghWEczzufY-rC;s!)pZ-ujcdLRfBolg3g2wr zbmq5veS7`iZGV5_yTo@NZf<-(>idg-IQWNqfAs(Or!9poTef^|*uDG4KSSmruomds zvfi-7FmiBkaKzxq5hF&9843RFHhSd9(YuWsJ9f9RW5-P#*ZoW!xBG;N6LudrWy;j4 zQ>M(EF=NKeS=&E-V2&FT#HEQ&Jw^#4bYA_L*yacnbmDy8H{xxFf)Hze>Q>I;c{#EAvZ*wsG)q6br z%iqjhi$?cc=e!_3ea$18mt2!}DyeVMmNyLJ2Kqov2PPR78}1zH)j>os zE}Xl?5Wg9J@HOkKX}gz@!FBFw8!n4qwlH$OzqQ41)Eh63TyWT|OM;W%W&Hbpd-;)z zu37u~A$MIckDRdXu=7>~XI*>Dqh~C6uW5nh-R?W!ySFCZdG1wr-kF;B)LpNy_C7wt zz37FX&H7$J%}dTY^!;&n9eDpa@9lB-acfU~Js-dEka^EeP*z>Jy7i#5Z(r`Rb>(aS zn0vaE-ZXj1;`S?VAZr#kHcbi7ICst#!v~x8JE8de#-)kB)qi~Jp3)b}k4HcE`WC}P z{?#>qzxEyK(-)S#d*|mfM*sNr%d5mCi?$d(`{0)8=l$*T#vk`OKXh#WjQM)2)zN@?W7pYe^ejy~d@Z&q%6<(?bo9&Ficd49Sh zH|IVcwBw=4~V(3g_GlBo9;`;Xea`D;sg!YS^J@%^;ujhZ` zUwzILd(Pf0Y^2gBTufi|#S(s2boTTc_xj?TnTJ1Te(Cp(6R~slT=be_-0e$Wy8iLx zcQ?2vZTjmE4}4!gcE7&lxepl($G^4+U;6f4x4*^i`SOO%_biGeb4OY{2;lPA3KlJLFR@!55wl`V#GZtkgfnzzh2 z^R@7!(E8<#x)s^HasENij`_>-W@?QS-A{7V14Vb2VVc= zk?cL2nAh)EKk4q%jftC&TJ`mfFD<`W`t{P6=Y^&}i>~vmoBsGh`1DslFc^L~=+&F; zt1i4gy8J?^vgD<|lGC3??{KW0?%oGFeeRF@euZ7GZrpe?{PEQnLl-Mc*4}D)`l-3! zJayEe$8Ry*d+nPKyf*H!YoAJOGJbg2yy@rkwT2(V>-PJ{JUCu5yJsB##;W($9dg23 z&f_DeKk>8Tn{d(8$I5@2x8{NU&Ny=2s@qmy^wzDFbdKEr_D9xF-(pxf?T#}Jd*#ou z6^Y%ST*ca#Se~9Ke&D6(((1GKdT`vz&654l6Cdfj`=rX!>RylEDn9Vo7dO7UXZ1Vz zPcJRMX8pqRmaaYL$z%81{p4Gh7z~3a|JwG>qvI}LxFCMfS4Xp|vZVaE-`+e>ARw2K_Z~y$LD}0wMTXo*q$DaGhVXIYY{L1x*)Yo9*X@T3H zSiAeh->trM_SL_5^Ub4gWS>E{DuyJg;&o{{hTc=Xv{zw-1_-+Ri% z#eK$HIp&6!mMoS^N3QzI4j^G97 zeY7lj?BYQEiEEh0y45~5<+q9Bet&l4ap=$$H{bHHMopj=obUN&#mna}oZ_FA{rJ=; zH=goY`NlJC)``gXrkm?Wyc^%V|D!8EdUo`)zH8Xc^miXzxA^mCzgq~Mev$O^fiQe^ z#~$n&Q67;yXxkJ_dh%4O8V?J z<0k)dMe?&HFFrf}$u-c3d)_wfbLFKUKYdYk+~1gIR%Mpvjyz$nx%Zu)ES&Qcd;7#2 z4_P2C+xKGakylqfJth3m&j{jws;E&D9^ZHW@RO}YtLK{ju;}3Gtj{*wFhyG6`gUp{=B* zOqhRM?8XbLhg`h-+@r;#9-n6?@BjA48ON@8aQuRm>(`DEKU#L;+F5Vzl^8naXR~+0 zY5P3B^3w^U?tARZc`L8xd7rdt+QJh)J#YQ(6Tg0TmUHFTPyBAG@$^f+nEKq`ul;1^ zgUgYp=tb{ec*~o&zi`*G#iv~M;5X0CIKy*UeJD zdA0bIy#Co)PXtSwg>!E>boOqaoXjWd_%C07_uE1;e%k}TA3nBy#e?-`;q~e5Yfrl> z*S`9Nxto4A^9^H5&t#`bAKdixqJdLiU4PNa_OH)BT3vkVyMI0U5&EXO=1#SR-)wwh z)2WXhhIT8}xB0cVj!GW&!u_}3jNSj{s=vK_Gmy>@v=XG((@heX^B}U!-YsaF@ zceV@u{OQHNUv=VLkLCPp$H-5Q_Pn)h*4ry4JZAd%wCnwM|9l4ikCoKtyXTzer9jw5vnDzA^qaQUudns(lq;m{?)$v<=9|&) zp9!vkS3Y{c()zQQ4?67+^x}fA>PJs_^D61*TMLZPK6vY}=#|-1|FYh9>72&squ(u` zcFP67ai=GHK3(1tmoI+$px17ib9k@*ec5}w&rdVH{%*?h`L(aTVYhypne*4*ANIns z`_9`u_lmrD+E)SOrsP9gjs0)`8a(z1UU!6J&opXC$iZ40- zujg&fuUnSC@|#abU)_IFuPS}>)c3BKC)_gojn`jU@%cL!ZxmMEyKK?vvE_?5e)YqK z6|)+jE?GPF-m4Dp<^KQh-@f<3g5t>CjPHK?O#Q@fK3T8{S+nS#70l8XpZ<;?f6{Y* zy;F+cxG;T(yM5;YOTT*F8sO2j>!&~Q#p%~=*al_g%qzt)*Nex=eOHt|KKc!RBRC1n z#j7@)51qCiz3@G-&OYdmch1?C`@j8rs`YIA)Z>mlGyxQ^&@Kf59e?Ganna* zAFv<3HS2%-*R&%p{nNwPanGOfHbVVi+>N~Dul9YX_`jO91M?DAg^n3RlJ^FF>jxC0}Uw`4ARr^d>cMHhJ0IM?EV)K6CdO{FwRsRo&O@ch|L3j)*Q@xy5io%6`j7=l!b^{vS^7 z`~Kw>bLTNHuh{2&@(}TgMVFtq#PY-oP45| zw((KsZ`*2c!s@%G6>iBN`QkZ$T$Nq+d)vm1)c*BH*51D8;sNi9==nDvJa64OuOGN< z3OURDntKNO@#G&*+IH%H_cuMC|3bX?z1{Kgw=G?^VfEF)Nnp8GSn+u7#0SweSDCjJ z-`BjSasSzGjk^8&b<`Ij-w|j2FZR9ztf^!RIEV!lMNl*WAtFthv{00$pi}{+g9y@l z6QxSoH54_7f&>T}M4AXlZ&IZMAw;B?0161wd$0ecgk9gi`~Uk~_dR_d_sf^L_vGH0 zGiPpQ&Ya5U>PoLTn3z@?H&0I#%zz-=_&-|`9dqE11}9sa=gSDN$`Ay=L?_4hX-s~! z)^K`B<+|JJKh%5j@FNS>9}QLF51Y(CC$zcuk$wbc#<2O!Nv7%l6e6G(%N~u(m3ZZj zf%|^*1Wfic^@hlBziu0IerhadNPzzDmY1I5%=fb6wQVU6-dEn9w`4?!8ke*2Y8``n zbbmSdAL>1EU)kz4r~mfey)lUz3OL~oSmUB1H1^RT({A> zb$9>B>$!Uv)mUx<^nbS8tySIh$eN;jdfD&FdDzT5Zh-vj0Ql89Y$mOefaHUhtU9N zu_Mv=?S=U5Y)7I2&|*iT@!JcreYPXf07$XJXl%a^zn$zb8UQVJ7>(avi0!i-Mgt(l z4x_RCKK$-vhtU9tvBPNm?u{Uv?l2kvC3YAM!t3z6a);3Xi1GIr4Kew4L(b60TFDtx^{*j=&(>o`NDcp|RoKW%_$zC*XQ zO=TJXYIfKtST*KT_N|C@}NYbtG2=%P+qxHEfh)fp1BG7p0h2UWu z;~pqw`cc5|tEZW_Hau#{QmZH{D!mnQdg)R4GF~cYAQt5k1*-*|I{=e%x=;+yG=^v2 zBzXdPIgf}y{u%C}H%(55r`5CLMOnMj=c5s)NZDB(SjyV>x-h;e2%$+sa?icgEBzRs z-aOhI(vgt85Csz}F8ArG;MQU}Tce1Ho_ITvJ1cSRKz!tA14`@_)f=XW(SSUd`xVGN zksIG_OutnHN(3%BjE9~-b?B4>XW(SnOd@xvXR^WcEEe{J8J?B##aF5-cvfLdims9# zYGg>1le~}|s(ZQ*5*G-LwhNbysh1{?{h#3^^`5=DJzZGS2a6M4&%)| z!5zwol$DQz)@V&n#t&plC&c>_IJ;+9HT&{xr{!B`NoZM8?b&+A+fa>*jE;C#Xd#y>&AA$Ls7csaaMQlBbyxSq>{?ZHA}RBY1|rN2>vy4A}I8-oN)BFFTH^0^7a2V3I6PX zqj~{tMPb4g*9v&9Lbq>v)1Ef4mkNJ<>ofL-$a zoa2Y>^hSqguNgRw!+B2dnrGF>nJqFSMd@bF7CBl+7`a}|`!l@1?!^y%_3axmF%e5} zX^IZ>*s-z%32*;k)&w23fLB`j*)QmFDr!kwyZ5t{pJ6n}o#Rt0Khwj{PJI=Hh8SjB zWt_I|UlLuIwMN_Oc)UBW@^F(rd;EJt5d7vkweNC9oN=}qQM+>E>B3m1*FcuwiaC=t zP~NQc_+nQ|Rr=lUMNp^!%Ot)K6Q_IA{nwV%N-D>Icuu4-jDgQmQH%=9H@N z8qDy%KB&uG%sq%6>5yfpz3ss0YM4cby47P&c21;&ty~|lOj{3Ca?0@=A()XaEIzF! zNbczp?uRX;9$x=~%Qs(svw(_d?-Ubef3EEzAJyWaRb4^9`nhBqqk!(LtnetK)EU); z!GKY0{Qx4BhU(;|s|0ad6(k!{g{2wAqr0tU4y+^4!z4DlvfXbY4L3tF1I}gG$QcB#h zrWzrRWH87&xcMZ};}17mZ`pW_C|D+|S|G4TbTQ1*ib04{z;7HFDzTbizWpx8 zksn;T^wDYQj+BhYNF(i|_*(LdEdA)ivkF9KivJLce;>FHcW*gsEH%EtkMGVE!sEm; z+%?EHN7&*c%Ej{p;8*2t)9T}gEsU1EwU6EQ}X%lxM*kn=J{?KDL*Fg9XHAj8tWqrk`>C)qs zE*#=9Fo88tx8$NbuyfP9QI$jd?cM!Bp8R%Q!y1SgrNxAp!cby41aL4{dJ@Tca2aF( z?ZBjid&+aiF}gN(1$uAihAQX((XH--|G1R(yC*oXdhuj?<>HiOKK9H z-85NIyFY^&9#jIgs#Aq~R`DjCF8)4rBAPPr&W|I36bJL9PUVWzVsSMHqbpQhbsRGJy$IBneO2vbu&+-Ia9!-zn>Xl))6k;*LN@N z%aIiMcQvE<>+0VAot=JC!OhdNxLJ7-(Nvtfnr%++ldAe-cfEBi%hibr}U%Q7^)g)Ya>6bnu6GbQedTr3{a5 zTfOrmE9J+}w{Y5Z;nShy-om!mV^`RwN&gU0qH23d%5bu-_r?W*Vk2_Sx2stdhS9E9 z17%k4#d`5gZ%^Eha)|CJ=$E(WDO0p^S~POT!QCmmUHR(aqTDM3k%trB|JhFP51aje z0KaiRWm-d#%9<*&5}MpYo09C;p}^dc+R6wB#^~$Qfv^=sigsIV60OuFDEn)%)uDJB z;$SR+X098)n|GzT6I%52kxnW7-i`ZjN0NB<$&wbN&MsXneE$Ey_TTyGo3}&zy>VzJ z(F#gVj@gRIVY@-)5+y?dqS4XaAjk{1@;2_V>KZK2LT^{BC&C=}QNbY>PQkqrGYog2cQg~+{mSw^!!&5@sTXVnq=@VmlP3=4CAS&F&1n|#V ze)E3$5QBf4QrPl|4s(gqX3w{}Pt-jAK$SN;4Da-4&*Jy;2>M28H{abO3&9_)Rgx%_ zo)AW7ug_<_7rb*wtplw12uttR3lkZpJT^oA`iG1 z0s8mKZ}fjB`&71S?1Wbxx~S4iK!7MR_jo^|pcm`FdGf>t=J&o#7;Wv_OCO;)_wqSok<3Q8QTS!GwJCy=1e z$vGV*Zmc~_RyD>V`n2I@3jAf_ol%@Jd&OLY7D^yr;3?Hl=pj*QH0I4nXe>`0$$6d+ zpIVm~4ISi}j|6DyszsRQNd%V*H4aRBDs|3vE&eh_pd0ANWCMwo)sv+BSzk4wwN;~i zS_hTD@*MOd2*X;?&CrnJIfgzsScq0Q`CRWxJgYh~|3m8_;`Gu4ewMg;4Fm|gOUTzi z_f7++uf}lw4)Z>(nR3AH*8PktKG#~9zhc4$x@d435 z>B!pMyLP8bY4*v{=+i!KmFwnSv5N&S4huDwgbx(7oke40D%L=B&iK-(k)-{0WVf3% zy}egRbTokogOP&6jgcami~$|6R0PqI7l90agU8JcYh2by&FJ_{MWIQk?tpKVM@Zw8 z@WatZRbf5^=+9FUhu56ASmnZU$a_@WWj{}Z?d)uthw%Mc-&fCsZdHr{K^KP*L|P8b zu-s-q&7O5VP-ovEgdwQ~HZAG%kqXkD;6~40x4KzQHh`3M`%9u9Fw${N}<$#g{I z14yZ&32&B3y!63}=#X6)HYv(V*pzj80?hqzV4|xL!n0JoAH@T?hXORWS#-a6ZPEIh z<>~e(Z0hdReCivLqmaBr7`Q`4aP7$8{P*~LFRl-d2+)7G+*gk1ah4+WX)2qre50;@ zY#guNo$&#P3`!4ZMcfa=&lwKxx!FxFpuyLWVEA~1Uh#GeH4u|{OVDzPS2N%)KbQ9Y&ik{v8JXNC zbo(%e0}7~lmB6(q@d_zh*QXz2uPEh<_F=rW;x$d(WUiounbts>=}4ZSu_Vb@Gxv1a z?1dYA$7l|fr7X);`OiWf6i458Ybr@~BR-&Idjnn{kv(#c*9x$n;;N*9w}zxDc#_4O@$c@)Pa0y$RfQiBB~Kw3EpM) zPN&HP!RzXnn+E4epiP0>3`l4T-ju}GlBmSbNl{kJ`xs!2mM4&W9>>Y#K?Q-M0To)p zd}IXZPg4>>>?8bzY{`e2GO_}%Rad$&nq*`kJom6NWF1brW;#0Jd$BOAutui!XfwA7 zhb50eVt5(d(x|Ow~-Q*ODYbII6fjY&}NX zan3=CPc3~$as-VKy}4+-@^KuIYW=b)N*59QvW8Qe-xxj(gyB};sKXshL1~t*+e=Y^ zSBp2Q`*wvyw@^p)Ld08K)00y^+Ywu6JxMYp*o&kd=ieT05GzVhle6WnmLhQpnE4fKg=c(YUA z7|w`Th(|o^`fTI5+M^5SH){fczKdXNnQLq~Wr=OBxiLB`Ju}cEn`%(HBT$ynr!^ME zGkgB%l>do41B+;8?TUm1~UOjftmG3hNzlFA@A7H0 zw$%0%dz%>cFkWVUZN4iBSwd7UJxfQXar_yEtB^M)K_hr_os+zYEyWwR|A3ol;_))B z;qfJ!2TN^rAJQ}8L@n=6P0KT#yNNN64d@`*b>H6hUZ{@;j^BR&V-%9g8WOKBU_Y%A zl|38lJ3nYO7rUbCcV5MBlN!4pzdgjJVb7=IKV{k@|QJeTWIt%o?r_cGU-ydfwQm?MD+w?vJRhIrgdt zTyQz;ZgVSKzkaxEP?qk&gLsB|MIPT+IiCL3p$OY~eTr1#ErSX-h&+D}M1xz~FjPUa zE-xd(=6%G?=*KH>Zqt)r4gwa0J=5MTIDAnt%7d3 z7e3~?wMp3>+0-T-Hol1zT*7LC`c5o}k=v@+KtfiA%$eYxWX(LSD`jh-JIrv_HBiV1 z4Fzb6If7=!G~%8$(&T9eUCqr#YiF>_(gw*Yuj_Y_VlPX$@gTkT)TY#06^vcZ%X##= zeIa=+ih37Aj`obI!Rd$RdJ+1wR+Iz98;yUnZ<^3UL31(FafjP6B#|$?HtGU__UV;y zUa9IdweCEBykBMu`0p0%YaoHMla^$ZSeDj?Im_;d8BHKRKuOz^kM)6~FN zDoug{pL32~Yclq1P=fJLRpH949!l#p1~7d!Rk7^_vVnxa(q(5QJ0L2W#p`%{t4YCA za76X%C5vZf_wPJkuV?*$37%S^2+2>Yae5@*z6tubzfVX*9GX|L?*!+QSR>CcXdlif zrC$RrP(^~}1=m1lG!?`bE=cyF%=-}aQhb07Ct$K+2(AE3HdGe2+B%ZV7B+c@i(kDe zM9uI3nGHp}Ifti-e3kOZ3XuCr6FZBcEH=J&!E598owf6KBP5dF=wDxa^F6!2YAeKU zNUoV1DRwVPP6o$O)Q1ot79O>+Y;RY~%|Y|CH=`|rEffY&Ewd7s zZCB!yTNq1nsGU`wqVuXkaDH=eFWi+uz1yskk=Zv(i`Ck9>frr(*w)JcwIO|Q{l+}* zA*VTQh^BivJBnw8C53aJewAoDl7Kk6Tr?gjG zesJMgg_Df&<1X_g|=;NDz2emt=j)PJNJnRRUdCp$nX;RD&HWZfj!8TQ8T&IEO{8fCM^K9sN_w25`WYirmqpeOyokA@alAVhJaFDD@3D;xOUBlk&t~W8zF>Ak< zTL$X@+InY5^_1L85UJYkc}8hSxWE!;pFhO2^|FHFr}b`qd-S^hMOC-oLQI`o0K~e< zuO)WFUuB&_yF;)(>Aho!c1!Q;T_@KeKcHI2B{yOq|JvJjCl%szd)9H>IAGD6f7TyN zF1nu!YoG2Z)GivMX`Htw)f$OT+m`ouTP$Oc!mBjXTjc6u#I62L#{g}D>}xqk{iGsaX5iCKuH0Gy-~97;e1m|bM)Od zKM}ZK*L0^~BHZ6DA%D~S_Q%`Wk_+;!2tIHYemHo;^!#%|m-arnw{iM|>8Ob$b2<_r z5g3KWC#rUz2*{oRcBRxjA%%P%uX&>*z1G1yN@W6ZfRZEtb)6+=0@-2af8U6(!2rqL z$*~mEoyTEv)H*jOqEqipGVW51BnjTzTh2;NsDP0UG`M%kya~ZgcZJzd~q_Sni_N z`uUG$jTG5#(}<|)VIHhv6S7-JaQ!&$8FCPe{Rk=2fGBU~N8H(*ZBl0Ad$%<1Oi;Ts z)+R%$ocFhPu26?R0$6Q&blAo=Z6kj?<6dGMU>2m%-Fc|zO(^8UX+ocfVosf3KmS%U zb2|f1-n@}lw_mVMdac*9etc+uFfFA!i52aQs!zB#pjSP_Emn<2 z;%%nOFWw>Cw`=GoHX5v9?Ub*`8^hy80Do1oYVe&ax<4xrp8x9$cET?|xBCMT#gu&X z933w{i6Zr~524$nTp9bd4FEX`SqUufRf%R2l0C6?7KOmv3qarU&nFdFqe6> zUjA_MCSm`!)Uaiv4y{Ylhc+<<*vqlWt|$nw>m2N1liM^5-f^8E5C|q-&?9RIft>~ zFL^z_IS+kv+QEO!d`zUhX1bH})2ML#FVA*)5xPXJyt%phR=?c6EK}@-ZE@PCBDQ(h zg9qOgT{Ir!NbmSe`K^h!_Z;eROf>O?2&!MbTDpyF-g1r^2H12i=2QjxZ1iA9#7P)P zpxhC0{*D+R&_Ct(xNAqm`4yf2(#4L5^D8p_Q$CYBBF?YK{1-2FM4Vp{>K}8s+!1kp zMd!bCu_NOAnoN6lM4Vp}5ho=D|-A_Fa8k`rzby~N*tqUJd80=?{dA= zVYv9^3k$M-nyP3@mE0JSsax_K=~!^ZDEt)p@4TY^u|N~w&E8YYtlQa&Ax-d><17$c zV*c&`;J+yW1RIx*rSSS~6n`$JReXO)6D-dET=vmuG+h`oT2qR~)t$mmTu$v}Q_#m4 zAVSQvTbH@u ztkN~mfH9n5a4vp09wv@*_OCeT;_2?D&YygSt-UcIHpaX!UT_G-<0oe41{Sl&g#ytS z;5~v_bfdUHe7kDgnRl*H9O-VUzOCS0SF1!CzAQ7Fvl>3>H*9*;kK(64;Px!SSX+51 zVoU|$Q5%}lF4`j|RYQrDKA7j9#hz|8uhv<0dxcg!q#j}+xxfI;It+Q(zxD1D{bNBx zzmHqNF7|FHj|=p4L%g9S%)Z(H-J)P_4x^WQ`e8;bc|HayZj^%Jo+%-=2lb(i=Og@a zBFK6sc> z@zmSduAC-5?*?xj-qmhQB16^@yc!yT2OM7zsk`q5=LU2?+yhH%Dr3S}>w~Q${jdw^ zRx`s%97YM*V<>Su1DCEjJ_rk+k{tFp{N^gVveI`BGrSFx!7fGEo~Va*_-2GOu&M(9+kyxTH>%5n+D|U{c;CR?RjSP1Ql}Z z2Smfrb@?p$2RW_`S)xZ&V!$3_vD_=k}G$`(Y?c zp4QGKXqWH|py0LBSn}&a)1JPjinR}Zfg8-5@c7`nov&AiI3B<00c73~8bO-7Ltj($ zBN*yQrZ3750adOaKasYM2sBX|`tru@gxLJtbWat5*PHc$P3lhk?EmwTyqc?-ECE zV3%8x;9=~q6-m?1n`lT)6*=eec}esLwtlYk`EhCAd`axW5K+*v5;iN;^*~)MC9sEe z!S#q#5qcnP46Yv`etGvByA5iBh~)nFLhpTd72>`ML=V{Up!aoyoM5wSEm|t5%yRPr z%1RD_iWjs%-mAlKQJy_i-Eou?IxAro1{zmUNT_sYoOjCzFbj>$l+aSdi^sYYNxwLJ zv}cR#<}T(yz)nS3UPWS|dEUMrl(uR#mpzz%4F_H{3DgY}O)E8hMRPfUgnQ$e zADl&E6XeM^NC-{U&Q;ly488$&S8|+85}Ll=E=z|Fbv3UKf1&BhAlN;y%wRyiA0C*wbw4){$5B^eZb*U!15t14 zNlH%XZLy6%k5$Rtg@7Ka%jv#rpb@J>8AcWM`#okla6-eVrcRrHQm`wV>Wrrktyr#9 zxipZIZX_Vua(Q#m>+R;m4Yi=WS1>7+QLwa%_WTS)t+()fo`CK+v6^NM_p1YY#;}l} z{na@Kk!QVCm+>@%cJYcl{V2^v=0Imjo62Yu1=7szV!`>%H*bfT`2RB#MA~Q5x{ppI zm_Hu27!K5wcR{!T90XmsE6-BoicIOq6D#%F=bPjddn!m#P|9<_N~$EIX)Y_6Dalq_ zsSSufHWvd-vqYXbZcjNKd=yLh_K-+uy&mvvsPkRvaA6o#3mS!wKdebNy7=bgMomAq z=M^INZZ}!0B)vvpU2<;^xXjSl5Mr2S&V&$O&lJVzC?OUf`@~9_XNxMJoc1*Py|G4> zK{v7+-ItZPmY&eVYVokypc3)Nf=Y4ibB2r_?7q(cg4Iv%6VB#oYQK_`?%^f|;@?ut7c6ZPvOySYSN_R7O}WW_u|J-m?wM|aP7 zR-QvZom{M9H?i*yM&)fmJK zkcylVdoUH@(N8Rrjx@}v&P|}INWh4#(mN<;q~Yoz<``kt$~jp^dJeHJr7d*so)Tf>xV#q;%hRK{OJ@%+2kO54_SnGn;{Ik%p@)LIs`+yQ9~cRZhB>WZ@|?A zCJSD*pjkF~(q8;9islB>ir2ko%IQT@#7q=)Mf0Ga zmDxsLfUo@g!fpoxkyBV8@P6A9*2c{6(MVm_o1SkuURYe*xanv8e=XbC0&&mAblO!S z529z=g0PGNlFjo`AP<50_v8+=$mXV*o-E4YICa@Y!+zf`#D!S`P5z+#-ZlypMLf4z z;@&2f!ydPSv6dW$vkGofnu{Lq{O^kpmIRQ~35rHUY7f(3HPxMqUh7!>t^7*j_cLGC z27OEr$@o0&R*#`9ohgGdhOX{Wjy9L|YpYSGajfLDun&)_uO-d83rK%Vkw+?lh|nMK z93KE}8T6w5VbAC5Nc`dPJ%V2lg;TKrLKoHbykHn09-_N_z-0s0WiLX5wRh6N6`WmH z1LBs1JqE9G$!z@acZ?tr`Xo|358p|~0y-CJfE04ktPN$>M)A;#LF#cN6}C|?_=tz1 zbiK_DMX3WDkrb_vXauv*N0S!fB?40aclrJ8&W}C_BaID!r0-?`146v_lqF&>J&h%V z=@$*y7&e6y02apugL7j8NJk>U7;fa`;QCE&;{nn`5amzB>q!2T|6}5siu6OIA>(bK zNPaiD5=_WQbCO`*!NPcBKob`Q1UR<#W1n^D>wFs2kWuM;i>-tfI_NZxU<$x1c4}e; zJ0N|&_|xqY3%(E9(`Ow(y=lgs(4ZKX9lQQwdPara*l8`r=bdQ0o*1!q%+x9qgAo* zsc55j|zKO92WbKz&7b?fqr zOzJld+)%g2lPISnkEkKU?-$4@2z<qc7u!q_HyWlY zWZa`EJH#P$>xeu|07wc1LUxIS5Eve4a6vR zZ@alVjTd`&>(`Jk4^NsPh8`%~Gt9o40G8s;&J423I4uiaEoXBxOdosL8a0Htbha3v zKhbp8y!R@to77@ScT7@m{s**oIV6g4Qh{72T{0GwnNQJ2=VOHOxsG$Fv<(0hde3Ix z$;yq2#hbtdRzh%Y5K+6Gv^sAE zA|!!ZZDLc}@$I3qr}&tS-llaPbU^e7^0u{TW5-nE1!H+uS{Xyu*Ps`NeWQ=oJP@&~ z4x1cw({@nsHLhe{^?@*U@C!)4a0`oEa&TSw*c#GUKmS6V;mSHqc2B*1XgEQ3EQxI2 zSq=2mo`+SA^G}ydlP$?r*`#Pp5|mb1Jz8#&l_zHBWannbIM^y{H_d!T#!xr(kVd0r zyT;^U>5@S14?xQbUil*;1F-L5l10q#y zUr@q80`yoOgu(uF%xfbq`~7neHzpkgYoAJReICYK+W4B^A_FIB0k6uc&@J3xD|ws1 zB@aOSkKuXPQUS1p&GYL>GAdHpC-51uSDICdG%t8W<8AC-1$lt2r)wn4MwW9|NADHb zO-m6U7iR0rC+EL`oNqc{f8YVT-!17{k)YDq_=b=<(~xE&E}iWje|_@Z-Mf|P3}+&^ zm+5-WZ|4jLfzH^nYb)9IoN@i|GxLLImzSQ|C58o$@hnS;%?P`=w;iPiOIS3<_Q%Ox zFU*Y^n3c40KOebJBw%^-b{?<)M-L*+hP>0J*uiHEt`Bz1x1_kXP6 z0J4O6_$@Ay#d~d5^Yw@VSKr$%`OFG;9>WNKK>?)qD$FQ~BUO5r;uo=-}-QGER&|X>C^HhFqePXGcMuXmjJhCH!oj3yBQm9U-ezcxdV1!+L|< ztz_j+?R4F-43GF2Lnsh4DZ-U8arav#cpkP2tbuTGXF4Lnl$Oh}@y`#`1SJQ1}^hea7kFCidPbeP-y@)`0gmp2taXBSnvNRIJ{aSXElEo3#pW z6QEChzDZ)y#BfyViTJRTWl!>el}rAbO4M$^e_tiwlAMawO6zBYu^)_lPx-L@(^J@E zDG^f~WBrx(5}sDUOz;o*VT8bRvHVsVsDB-@pKe>Xn-phy*tC?JT_@5|o%ee8`0}y@ z@1k)=pmks+@E*sLH*)9?(NJC7k}Q%BYQ+YcDR3Ba)Kyq*@D!pH@C&ZuO-sT z&al8+lHykASjlxP>j0Zspo&1=nqfS+w1GLV_+P2Das6E)vGHkcFiPtc5?`9EsCQPn zn>`vsIqkL4#5ejk>^ok*yud8`E@VDGNCNCF<7w%QrG1RQtxhq=kS0noM^E}niY`ZP zKaF;#_Do!ow#zH^1ehtnr>sl4YViQWF^%Cly1Ib^2(%}{B-~wU0`X)1@ zXIFW@BoPIl*$R#H^Cd2yNJs0(h47-e9@eCZ4>V<3R&ux>QAKL5>O}NK7(K5MesI5; zYEVZ5$Pv>)@ok_p6W900nF~{bF zE$Y`m4c;>DrEFFNttR1-&GNgu3`^x#^kekrMHjtVqKVIMHMY48nkkxw^KDtmWMKTz->M#Mxc{bAKrD@WzbD-0WHlD| z(4&k6eVMYYc~2y5;pYz_ORvV4z9f)i1J*z?JjDhlbi2KmYf*(}Wij{xq<7iumQV;v>R3M}eNmtME_wjQeD z7g+fKQEwsKO=w}W{4N4>REZa%R%$M!Sa``D+M?U1L!vi;e_dJJn_hI}rmkG}a<$vx zz~HjFr3NeWHIQ;&w5aX~tE;25>4-v_UdG`wmVHyy$s*G?vnb@=`@XoAFptSJE5+}> zDJ9Do&(s4-F6tpZ4URcMZ6PhiO zwg$4ahrY3}^XFRO34L~xoHlK*1NvD^QoG%FOYF-V0_5ALjwt&pW|sIkzO#bBq?3+DceQhtZ9`FOkG^i6TcB! zQ#5)t(t$C=9$&jsU^~!+UzP2F=2L?iaQYU}q3lP-2P5LFfO7n7akbKtRq9TO3GD3d{34gp?851y} zz~S*W=iWr2eop2C1%Y-OOX{omH2>gpx9ATRR(6pzt~mHt@Yt7KUIW>S7wPeKXEG18 zLXvG5(rwcxritg$x&tUVVvzz1QAd%KtJOi*o6s`1Mpo|iq4bnm4mER4NtSc)y6C&> z;vtcT{3@NKv{+hal_V#lvPZ(rsxL1vpg8V5tWj*HM&2oM7klT{*yk-TgM%8o=yT<* zx)}SmfkOw>Vzi$Go7Hg2snL^0$201~(&gzwr6_za>Qe;tZ;#l2>x6;}CiHYxhdD2j z(b4l|$2DzSdMoJ~NDRmw3Pipy-eRLbAoN*@vM*83QV{|?t%_rZmPgR7BSW&Jo?S`Y zRap#wjQ#iaS|w=?T>)2%?<=}ObJS}avr!?21c)IP8CIL5|rqR$z?eq-RtE% zr=^Xotl8U3L}{Txa^@>;;w5DLzY`I~ZHS209~zp!bPwkDs2|=Bon=6*e`I&ps=X0jEk-FqmJsEykx;5?d}b0 zc{dZ38HR^?p%@sF)e;`KV`&{}4P2zMyM;;{EMS+QLiv^fEUR=l+*&@tO!@@{if14} zkuFDiLW0gekN$ME_7eM$@RFP$QtfkYMou8rX^j{dOP~w$0K5)&$gkLtprAi0{t*1M zCFB?%zf0^~(V0n0O{-~%V`*4P)8sV}nW`Po{^kJ_XEbmy$mJs{`x4$6@8;Xq91rb@ zm(q%zU7fyI@SJelSLHvTz4Z-UF$MlTIUlGfw%a@qUey$YpU0vz_NwM=E>eHpGMM2E zw|&L#cLKF(+3-*w;{=uhiBj87F{F{T%H7-pNIj#JWfk{F12p&D!}*X#xSYg!w5XeC zZ?$8|)AU;^F&v&ZgM~aGOiM*dGR=PLy=N3VK9SD=d>1s%`{siCc!w z+s)ouH2k3Iw=wV_(6J)qC|_fR_2@B%IZ^luvWx^HLprt7=3_>s%y8G7O*3?}o3Z@`bk_U@ml zj)M$#rKwe(^(e}9(^d$@^$M-BsebWkxmF2wWkKuV177I{=v_!-Yx%Izm)$t*7&W^w zhca3hlQ4ok-Y6ie>^!a2j254whc2IuTC$t*XwEiAo;krlQfehiyhQ-hFGp^(2)Mkb zFHk+&cb-5My~4u-9_o`=o6%j@?FhiH%kLc`Zog!;M|vOhZoAojzs(*2?d`$!TFQqL?T-TdyAfmw>IBm*C0wOvOdk+ z1a6F?ds!@B`!y;5^H+Z|2RQ{NX=aZ*iE8fAd3*8?vHIav2k_j;ZqCd5*p;!G?s^WC z75cC<%t-P3&z=-eiH+gH$0dj7e99%a*O4XHYF9PlvRst90qc*5SgtHe^NH+~K zph-G!+VXshazvv>dP$RWVXb47#F7`iwxC}Z-@`oDbb4s!-0I3xA-~bd#>j< zpnuT~>a61FFZUtrOa|cLadjNvb~YE}C-A%F zy$Wi|NfN(Y-w+Q2_^Opu#3k`99Ej|5O}jZ3LRfIW>NU`1A1CHTKMWx#Bu?7+xlN!UtTFOrJjvWzlG8WIh9hL&*cc*gR5^1kzcpnm{~l-IYu}dbA}5$F%lOS3-Uv zaOQwu_CEl-je+RrBm?=7B7$Nh`_QFUgjU z5=KC@`oT=0?v$`-_jxn3M+7K{j*baEsR)7983?V?nk!$=GjDnm#kfM3?8xW#ad=-i zAfa;eUIVRxj&?HxhQ3)e-oXBHdGG^f# zmNPGh^mZ`;3Ytp2j1A$AKy_z1drEy=Ot`P=%Fw28TI6g_ZpnfPA{8fO(jsl-cwHGG zu5*e!o4lJ&qRa#-f1<)iM>TxbTqQ3OvIc6OD;U#Ap<4!m52T_MO(UghGK8-W9|d<(?;eu(IJQ(-A@nnlqHz5-`Z&hY3j(UgVu(HyNd+oGFTA{S*o zuxekHtMbI=r6nnLcSegl#;`_;q^{WQ_3a(Dz@I3#8#ZZP165AYSZKhyhTv?Kqy;T2 zDfqNgx?bDl|L>gq_4$#g%Vc>B$?F3-Aq_1ItnO_tYPjzkR#NZkz!r#D2$X_AP z*dM1gC~>{03to$0S|Qfe1g%()+HfYw>$W9MTLLP5wK$j&ieib`=Up+SqEa8ry|9}u zj0t|y)Tsa?)l7-kwvH&(vtpMJ7KPsDULrmw?9=X9>C$B3$ftCI;Y5THy>ExY!C>au zD+#rnN^iK4wt%F2dtv&~4dEuIbXf(C1WzE;b?>Dt(S*O09*L5&hyYYXe6M-dWoyEQ zbfK725udi+pIsL|(Q%4F%JU1dcl3`1|Zb4Y$@JsnI_1t6;)_Nq} z!?4&^kVZS8jDD6>pp87sR&^~0o255n`A&h8GtV9NQnCYWW;HG4j9rehC+_oL`jTHO zvIZJ*Zp7iJ(czj(3yy1`7yjZmBbFI--T~9MKDb0!e_)?!uyc9Yom7#Cft(JPk(gHU z;n%VLxP$0-vKpOp;+1J9Q%(r?MR6HYv1*!BQGN=kA4VRplj74-fO(dWS)&6M3I^QM z&zrEEQ!`hOVY1-EDhJEzi=GRV0t{c%C9Rl|+(4KPqHU@6_lUVf*0jBbfokuhPIn&p zG_n+bv0#&ie=!QkQ9ggo#Q8WX2mZ1NJhet6C;hBMy>Y&9sV(P#j>O?H;5=5{QQ z2WcK(GuoyS-aMx6^U;HQR?<+t~_1(m(F-7dcoJrK@a9mY5_t+o2 z2I7Yl3_5s(T@W%lV}0gxykgC{7OeUcLsHL1D`UVuaQ@W?gnMgYOC{KoL&~81)7iVb ztHH(`Dtq!{-1V=mUU%ieA559X!bEv->-Mr%qK0%{TYdZPDL7ckc@}E`!&m#~Y};PG z>)s_vEZGwwqGn|Ab~iSf{;PrH_NaX!rb55AD7Jf6^lKf)?WJVnm`)So@fyfDV{aLV zpmPIV_t54sPd!AbpYbK2Kn$wgUB|P;lt3L=IxVYy7^3{Bc&o$zrQ)!!ptiNo0`6eb zXDijK&$qDHb-mY8opYJCMx985TV@kT@&%z|Tq5M|D=(#GoL?i5{N8?ClQgLo^5UAq=v(9$v3;a?=*i6ug)wKN$G*(K7GGCk$dx(CUeKMtrcFhXcXR4#7|rd5a$baR zMH4T!Xn(o~eSepLW(1%qwn&h9nb-@P3Fzq1{R^2$3BwZ2`{@s#p^1BSo^pt>YLcfx zO4Gi#l8WN$PI92J0t4!bM`FsKY&XZl1j(>s9w~@S?98ssByWEyFtN+J%+J%l>Q+Co z{D>g-l{7X1XR&VaTVf)hbbo{EW!!x+0|^Q@pQmcQ(WeGnQPj`j?DRP#oP?V_Sc+&sJGLL;?%H$xL!B_>s7Zt!d?-N!F?-|!g6 zP}D85bZXV{bxV>c(nla?177hiR4aEzP6S-OJ$$R6{gnTJW5?SQ_RM_{Cnol*=}O}I zf{a@ouwCy;lh~5(o9!TiXw4r#?p2I9MOF?*WAP18Lh^$HQ3mw1!1e_CEQP8|f;haHc>wc{ZR6HR{a(QqvWjKlI4>UG9uT zp^6)4E=lzZttNzFnj83%0rVBWNXN?}Si?Qhu;zBxONB zi?WjQ1UYKKy~(VbL9&VTFK=4Y+OZNe7LXN0(})cBw05#E_%%|)(n>Tcnjw~%o1G<| z9v#x58rVkI|E40UPt8+dNher>j2mk+@&YNBCK2 z$?2kMJM-CsvSL72OD*6PeXWt-6pHf#vtJ5fv?;cZTq2xNH9e-xo88o)0dCj>K>lP7NL=IghO+2z0lH z|8rE*cTtbUE2oK>Fw{NIjltz>Yc1t4n5674&zh)ni!HiMZvhE)>PZOl;T0igVi;-E;Bww6# zi7tv7C3Y&;W^A^Hg(_gEdtP59P+JWfT-3R7JT)*wYc}*qMvO6+`sE29w{U}P5Qn8C z(6=Sh7jA=)l(ZBSO_;O~DOS|4BMcD@HvR+IFf$_rQ*kcBQC`>E0PGRfzl4IQIbJ$d z!!>e7fbW*0M&GF$^OiQrGHEm=wt$A6scf@gwiVLUHI87%SQ@nN6_nUw1m9)aeVu%l zd4h81H3ET-IMi`R-qy9(bI7Anb$*X)i@zq^^F)v^VxBK1sa|s`k=#m!uY7L9I5$v3 zZl=K@f9sX=W>y0jwGHCw5?PyQBALY}c$ z(GAuic&W)Fs5ugq=4-I@f-3XyvCk38GirVV$x6^#mE-WW10~pm_PcKRnSfI{WrC|l zjZPF-FG^$U4kAh%Kmv--Vin6Nx0?9QqNzj?gVvG8jVzTST3(Hr(r6NJ5%qj5(;l7{ z$nIkz_QmQQfo^PB3M{mX3PmerB~SI-cH_>3I7;&7B?;Hvk;_Tu?w{L?yPb+w6+&MJ zTZb#4$`M&qNT(1+)f4XxxL(u)7T>MS2u51h=j-2){Bkyl{MC%?>ez|TzOh6hn`_k5 z9)SoH_pwsVyvNi9xS6t&<#e;H3Pawt9vhR!Q1`mye3aIoW=f3bZ2w>|t9Mw4J2K3; ze;Xv4MlumW7xgT{vx)MOafI0jq7??{sXGFep9a)5t<;sOt2=q0WDTz0lq6G_XVz6i znBFY6sOLT|w`8H}w>XB%-?$^&-?_oJgh-X<#qjTc%^Je+d3fq6`lH>cl-DSC>Ieem z>%1DvWBx9*tgRm+vZElj`|yAH$DZd-V2J3@oY6$`lc6A)<8nuH((IU?FDOa`!ua7^ zW_i*y!@4CFj~6-Gf$7rq2!Xa_kt@I^<75Qi`XJk?*G{Fll+U|FxJ4BqhP$#SpVH}~ zwYFMUop=%1C~%hMnAJ9jw=M&#Z&4)`527&QUQK1O8^!YG8r76>Zq8UY z&$n(**k#m`;*?u|yy~uA47?}Z*lZ@6rMV(0#wbrjCgIIYK-@f2=zOx`@?;W?{ze7+ zF!sgTH^#NZE+*+|^WgA4o8yn2Z{gl@!Q;MKR>2#Q;9j#wcVA+t0`rgF1w~4Hr|3&L zY=0KV$kR`9w~WKZDBp>Iy8uHxs5bILK-nRJ_f3(K&8U8R^Sd-oNy;>P;g2#S8dy<(VH5*M~TvuJSB?Wvr|5-RA{mLbM!`<<*0mqF>1C zO9aU_(-`}9T!OD*GQxK=xJLND5BJ>7kfevdfmtX~-d0NpD5C9JYF~Gpc-r_0`9>dH zN>z5XBSBVrrxXWT>l_&*%Z^QPdEQw*n#QERp`wu{)K&&7)eand~+>8lkKyzw2}m;6Aw(hE&KYVqiuRis2DMXx!LR27S?)NRtZ_XtN zt4Szsw}Sik596AEPJ}F%m2LGCq(qhY*Bt|iy{gU5%ztU7@k8;5xO|9~@!Co8&=>DU ziLkV&IT(K`IR2h}gcMUKibmnndhq(m^>=Z%qjA_8bS#OptIWqy#I1PXh8q6k{U0B1oA154 z*1V*4FTs0Qx$GOoQ~{KvyG_`=PxH=Esuj2wj^Je!f2(EEw+@dVSKtX4>Yn#_&hwmd zkQH2g(da^{B#le(;M~`zLBj!uc(n9|y1g*e{jT6naAh^2?Mc8PtG0))A7YCPlw14l zwY%NN<|EeAGU_6$F%|jiZhO^sIv~*C8`?52?lJYcBk6(kPJs_3qlD83Zu<94i(YTT z&Kmc-`necAkC}(r94dP1?92OJJM{oo$+_g~Jna3Z)jqtuEsdrg5~}PKZirTVXq`(C zbK46|08$D`1RYHTZyP4^wpfilOd%4|=LnW)8Q9K6E?WW<=nvmhBbxt?=d<1RU$Bx&#d$k9O>e}CEuZTy#A$)ra z(*gu!^ZE%3#h?0%QiR#Q?#`##?=+;Y+%Rw|`gFqK5~eDD_E9?or{^JYG=NvbaiZJL zb_aF3ngb37oX)kb*$4F~&!R=QSJ(ST<(_ouMcm8!Ui9$%*_Y^;z(vq%i&D806E(2j z#%oHAt?*vWJHTK`cgnDPS@TX&G~agTxb7Mz^=abf?xsX?m0sv9_KhMmaOA<4_Eo?7 zX^}@&61?c5p|8`V6p+h1{;ZRlfO2@x@^h+hn3(9B9yiIDl*Y`R#l+(`vFmMkRJ$vj|Jxt`uc$UE z+kERpNAJ|U&&_LR(S`W`PM?6NqEYXXF@&?Y_dBu6E-;3u)68=yb;W3bnrAU`!6R1? z9=W1FkpHI(k6bb0`}3Eb@W>U9T>qcG%s-5bN3LkG;F0V9gZ|24JaYZRPwU@6#3NU< zQ1QqWk6b|@s{i`^SUhsYBUcc9&-LFhj{aeic;t!^3m&=R_gq2vJ=dKsf`EB$;LWPr zkjUu)-0{Cp#3EYAZsg_kG0{@Ro8r2XXU;^at2TM5MV}9s5K5z|fA(o1_&wJ@+wuaB zT+#38|H_3&u4pmikt-g#;*smFM#C{Z?$eE-r2=+uPdIsdp+69k?C`X+Rc;oFgBLRu zaE2%#(nDSs%8={kMJ3WWKSO`AL%KiRh8vGu@yHeZLjSK^c;t!^Gak9(kt-g#Htlpd zc;pKDFSfnI@45c3jI2N=c;t$fG#S-OgwVMBUe0f#Uocd za>XOpKW}u!BUiL6@yHdAT=B>ik6iJ{6^~s1ys;IJT(PpmBUe0f#Uocda>XN8JaYZ> zMpry?MavS8T=B>ik6iJ{6^~r;$o0<~Tk*&hD@#0b#Ut1M50GmbicakN;1aE1M)%>n z+=>bpQl3z96I4G0XR3Uf?@SrcE zg0D~C7zNPqsNYH*WWtc_&RDnHYy3L$LxEEE7Yqfs9WC?LiVZ3L)Lrbw(}XSG$5}V{ zyfRW}+3w&0DjeShYZ`MU)%hr>Ya%%jHea!~EMs|=bC$9WugTVT871qVESBodi%Skl zo0?)19QdC8N&t-1wVr<<;e$&MoP8)kQb)Hs_*u6k{ILoOEO(`~oPzi%_OQGE;%AE>Z6%}lZkQTRjJ2>t*=J;**{a${Dvy4C5K+Ap_ojNA~mHN6}4Q{wMr*^RLo zv*im>B?UdsZV+w=v9wAGHOZo1D1jzk%LM!5V*O{HpF?fW>Zj~hen!{a7SY<-Xk|~T zKY=U`Y=AU#&OYpj|J%_!y>;p|LN$~&HU>NkZ*0dimrFv}+me_vHu=|w*n1HeDNsTL z-znP-alLD%rTVz*b}ew#w*4M(K&ZcMYBBcX{3WWvjF_Wa%jOvQW0l>L^Davev~A>f zU+*11sQc?#bb^FJBT$$C_`~#P;;OT|?I`|~WV~8qHgM7SaOpW|G~Xf0>Pgo6E#S`8p|+)foNRdH#(3AWPq(;kS+;iHmCl_Q+W;N*9q;G@B*jKQLo!RRVm8-KHVm28+ zQrY6Nv{Qu6H$V*8;WVBr4@6U&Q)O}$P8cCV%oJ(ZqtF~Bbb0F$^Fu+Z%a{W1o73@t!|ZHpGx}IZ+%zhBz3omf#v|z{ zyhD#y=RDg6ImRE{J&*FxIth%=n}Iwt;1UDKv{bvoz-PFJha4dct8Veh(Qj^E6-)?f z%icJr`w&w@vW-FO_>o4jN|BWNQM=v9k>zqap1M9wtKRqUw-`PGSmFH&){2#)FeZes zyq(I6Hqmu`T|`hdbQ;Ecg0chGzT^i6kTk+H74`WB5~Y(0q7L~ZsEK4DHsff>v?Ars ziJJ$H4mrLt_M{O}zjm#{3m3I$qrkLUcco#ipf1oCLp;n=GpSeqBA2T#>~+8{-%V@H zyAKXezFmG)t(M4(VN{_vSd0Nit|4i)U9U+XZW|6QWt$;x*Cra3ukX=(1PX=WZ5uZ- zb9%p>pwz&`1a3$C>O4Z*=Ea{*J*x}f$@oRX6phfC$p?d1Lyg3X#pfK?P4d$ujJH9) zIdQiS?S3xYejF=??K&(u>4K%xYd&oapM}>3NVh@fD7Qh)G%6q88NI+xLmsG zI7#fR*U_t?r=!tZd-{fYh)JxSl2o|4Y6O2H&nV)c6n2yW4ouA&X60x!2$sPdp<}3Apsle8ol_yA!s<-|wgUfnJ`uR=3Ds?O8qJTsw6w&R!C(;TmV7 zy_VzX8R#qnHBDm2wP#ErtZu&7_}&e^wxF$;qxn8*q;cDz0DHPj1QZ$d+}ktENGsC| z%}o?BXdP+X$Wkey<<*!ejV1vXQP0OR?cr&G>^>%9U+jzpbWK;>$YpMtiZ9jEzBjKl z^^8o5xUh`iRNU$3%O17nZgU>L{_SvSmbYqi#T_veDMPBLyl$er=y;Yz5BGXTSQNC! z|MJ$aAvV>RjP}i0WQ6haCL!N5KDBG!kBa1)jWm^4cb44-b#Xb$(`8P?DVy|CW)kC; zjt*ITC{7bL9#lHy2h(V##9=(>6KE9d8p5!tfvTXV&BxmWerm|B9I*SM_g4r6~==*+&?$@U1C>3n5%R{ zEaw`b>v6Y>b_*a|v5|R0(Buk)AW)-$YqCD@OXnEUJ9GJm;+B?51BIzX7K$?isUdT$ zoR)#)t_+1&=o%*LeSehF0oDe`88xQ}hs))YB?{Pu-xW__7Kt$ zzgOmgC~$quM(bo4yc5?He^I#SPLMEe9$qmXCTtNT*xAzQJM_HMo<@JgtWTb2u)7Nd z_Mq=rueg15r|&0ftLwBsR$u5_L0Dn9Jk2wXQiw@1%m}zL-;lj*h@_0L$pSS=6nb?G zZxB};$PDexsTh^Ffxn_aGzHQ!3U)(XO}E%en|Ur_q;f#6fjg+j8D*UVtK#1sU6@LB zgkpH^84u~OPh($lN)rLCi;tJuBP$==4&I#|9|MCNJh_(IoLqms@e%tbLxv%=Xxb+;U zOhy{b2Rbnwc@(1T=G$cgKYcAC4??1K`&no!txF=pOl=#)EFZ_|8A1P={Bh3CjPuYk z%wKCUJ`N_IqMj3e;VWdILWNxryTE>HvX%fzN`{f3zljJM-U{l?KFzQwLho^u<65ks=YeKmzJL2LrIUrdiGxnvi8a6M-UGj$MBi zusB{Z9YGODxz0kPE_w^YyYKtnLp6wjlcJxFyZGp}`e=)f3v~C$Lv%uz5t*G60m_#y z#uPTP-xgC97^Eg<0Ixkcof&vS*wRPAZbNMxwWQ3XFL+Qw8Y>k7{<)fp@$d&1WO;9@ zcKaBul)qsVgp$JxV605 zK&!l>Aj4b+61RmEcq_A}@YX{1-EB~I(_zSqIbES3d%8~tpR_JpG(GLQp0EuH)e1IP zVQ&q1s~Ni%*GwOF*!e7`s?ZJ@J_G}a6~dK)gz)Q~u@B?n%10vk4DPzuS3FYn&Uvj& z9;DSRm(+t`98FezC9uwr1sSAKm41(jz3)3`h|i*b!e)pYZJo-OoOek=w2uWUEVEmq z=_guhroAu9IoFgI%bqosABA%++H%w;Cj02hC0Ry?HAhPY)F+JwpSf zvnZQG76uv#XNLTn5CuZsTU#`+cww25!OIJ3oe;$fs>- z?72R46*w$fPXBrp(c#duwIpDrbhGTauFd+2-lSrRHOmR+x*>~nypCx5*qho zwM>$o`fJ{$JA>2JDG~E(li45#qZ4ODafLimhVn%iO(XhK5y|IoVW_V*JlZyDyH$F} z9zWc9j{!v5J{)?P_ifj-bCi*S0fz4&!w6J!9{gcS@ZcqEfdMk%$Xssx(iZ>sG=dOr zG7KM8YWNGiDmPlizKhpRcwtDut>9kLn7f`p!PFR8a@Z{lmGJDyDevPZ5+`*`4khIC z>#Ge-sd`_qP8-NGd%X>k$lL(rb4AkamZhp|@b@H%&*N}lHHsP~5hQD%leZA^kA5f^_1~g1T<4A3TmBpc`Y7EAJca9neK*P;LMxwkcpWy!n9`H zsOcN&)RqysE@dOvBPS!EDYOjZ$3}5w{j0(~Pq#rwp^u_?QLEda16vixLg#GGPey_( z&B`fLWt=y+LAgxbP^#w(o6kF`5Arnjh%|^nsQjObSkRa$Q*VRJJe4RT`ie6aTVqU; zx;Z_mByFjNy?{C&nl>_Wad%SqROh`c;xOWKfr<@I^F7+^9Rp_;Dv$3W7xX~k-GpD& z^6SG(qe5oGCA;!OO)f1=8ZVT)X8LxTq=MByJ@V*SBg>V<$r}`7ey?0u{U$yqiA*)}q(Cu7tk*aWP*XZn!;PST9U$>;8p<_%6)`EtDpC~wsZGDOSKj< zOz5&@s@@mUx6BK*$}5a3iHj3{@hElV3o*7@t_yd{74FCYUvUO7)dyB683+Ov(20UT zdtTB7fuWH1yoe9pIs?=tIib$(J<-GF0$T+zO1~@FaQbj#zboes+f{uc8aWeSr?OS2 z6&9_DaIAIbEHc09?ddq8slVbLy#Xcj__%QRvlb@rr~)BWG;5f7Q;mQX&4eup?08FN zOka|n6Ii`N*6?$(T=)7lzlk;XfrvbHN5?$%V9Uw#wKf(uOgXQwCXv9Iu>EHS(0>vH ze8!{*+Bf@3AV`(Dpha`EzZr8X>`i81g4h6QLWE{)7JmcD0wA+QP|7{C;Tnn$KXeRM zFL*CI3pV@1qqw6Og)B5&4NlvR`77u-4TetoT_OKWP7R~%gk1iD=|P!FkK2m#eMg0J zkC$7DbJdD0&aw?}To-mV%tu1=MN$Mj>dQPY!QSd>qnw4cKcr3w8lljB}BYT%M#g zk1&s^j=R#G*W3R9?hs?TS%UtVq6aQ)laE6UgOFB`$HKhj*yhzv0v_D>vG2zy5Q(^d zPs!E#`7iaFTJl`k2f9s)vJm>r9JOUtq;b@6rS$I6$S+xvj7HbV~PG)SmWEQk_<#a7bUOs_{qn zXMTeLDU=5r2W$>!&_lj9=Kxd2sHR?PSk*7Pi!aQ5>bbZ3KlCi#I$?jn3tK*-!D?O|#?uw!10A_9IDVXN z>Qw0~einUsVnNQ(of5#SV_s*;=Iee*FdM)5tsZtV^Jzx~Ss9|W)vr^jI3vPYwv%)? za@BYIWW5CKpf!QuHmHAiy)aq6mG+4y^j*T#Ht4ZH)5b?Qc%(z9MmIqzv?DvgFV9ft+XJcb8lTgyY#?jq|Fxgf0Aoy{dR<55BF~+YN?VT3IGQ*!K}PjYrZt`1{T`vK1~l%0{+~~T7aXdRAaohJ zaWMIzHfpr6qHH_?!SG|5Y3mKcMvJs{0lbK|`$A@%i;YTGj-=oZc_FdbH5Tu4;P>=T zPlO5(^GL7_Xc)C^%r3yKWD;z$mzl$k8LJLIc_jLIPOUOo<^7wHfWc%ZLDn>&smTx9 zMOc*?&ISwVJ3GDCc@j91C5PFz}Q1~CCN)!qJK=_HnOE! zYVs2;WQsw!apSdR8o2druiSEhBZI!WpiP>Emb$kANo%^2xxJNN(koM$v#Y-@>xa>c zJpR1#h6h|&mP|e2jm$AukF?1)d=_?0>H2Woqm`Z(WhXP`138g~23l+GA;PXp*Mj3OVtfGF|QCMYZM)9#l^ z-7#Ds0gbD;heW^3)wDV*Kggd-IJV%0p_&l0&U2J@C25_D*-e9-{A1ZAqiN3U-fzd2 zad9a_DaAZK7S_4uXSF;9`>S;ecKAePc;~VNyv?Pa2=s^!tEQg-*1srvZ}faY@f@3x z|DLRGqkJYiG*K&0Rpx}F=ZVA76YCzek^}Gj`K}Prm79C$a`@fUQ0va_c^9XGaNKMy zzj_a}#yPXTKHd~(WTify_o&sUeo%^%D4SJ=L|m?Y(CFK(I96C`u$jZK z4MIh(HE$jxPSW|cu3hB=Yl2(2`8FtBua;Wzh%;){@eYhmQgEr-yzF~G5o8AOxloq69Wwm}Jj6amw2xx#7@5EPTF>Qk9vIJdriOG$&%wQB3( zPLUF z&ho8!?a1Z46K$@6z)J1AtjAB!?9f05`-dxF{L9rgZPjYQnDG>J%>!z_WjL8^y-;=O z9*&Rj#~oBW^$jbhVkQ0rK1`B?&*mX-R{g#%pC){@v(65@#P%*)EpDDP_U&GEp~RTj zhp*RvN*^roPnF( zy)EddT9~i_thr74%`}prmI#Z!A63nM3XxRkG@p7ZWOg+Cyc&Y*Z7kThc4$@Aa%p{L z-FKX|dXC%vePRT4DXE(gXLLk}shSV8^n|e6bH857bP`D01qXIzQZ^LL_sC|>41@us z8RW3)=6wcU^Bqa;p6%wi- z?|CZ!6~)-09W;WDyYF|^fie=Uq{95oBscD2fh9Z&CkTqmD`cQ`A(AsX+`)2PPU7`c} zB;~-q2ztfz;%vkHuk&iiH|;0q)q<8-{n;1=rA8-4548c^wu+2`3S8;3D>IS;Ne49~ z*xaoS=wIlsf#rZRE8Jie7gk_^l2{nZi)vK?HfNxQ21mWz1z%r_V=aV5<$UYT?+v6t z(t3;%t^X~XeXn?kE-KM*VYzKlT3JtLQ~I==Wq-Jm{8CLf#Fg^U51E$sew%OC0+D?W z*8|IM7D1pdOLlUePL%eI-PHgFU$k%@lu!#)P!XN z5%k!~sq@z$LQ{FK&tmm^MW2e8Ntw*pJ0y_AzVh1PJET|6b+xX~#HvsF)ad0M;?KdJ zHx_=51^|M{@U6(IV!9d!UV)caaZia9E_tDUgCmcxTJ;;KCQ}%%yhIbw7x6Rw7_OWN zv&FN_3tkvXgT6E-1a9P~P!PodsiXbxg5fO~B0&^&O#Rz+^B1O%@4dva35uNCqzo<2 zYIJ%A@4*p&4)?ly`g1Jc-+#bd;&)~KiwH7jdd>1^zYB_JKhM0js}4x&6q2TqKjS2+ zaZu;O**`4Wf5Hg*KMVdZjfnp<;(o$P|Fc}cXXs%kqGE3nHAvfbQOOYZ?BM#q(a>QV z&b+Cw%wKTCpTom+)sq_aTm05S;pU-Bn>@Ivzu zhS&?FuB0276@6qsmw|~1+)jamH+fP=oalNPLIkiG;~xF>;of&>ep6PGfQXx{1+&Or zQ~$??y8uM~UV()aptf6GaJ>)P-=F(rob%wsWSYn$;wrY?1vMsm(D;`2WXjYcVY>7) zbkYQ;=OJ-40G=5qy8UcNBL@Vct2y9c!0BA;ntf1@@+_K<=;4rK(1_WN9Ogz268tyuHwTzq>{XA`?5aYct8)UkXfI#XVl;=V@PctDjhYL=gK*8b|#p zbSaD(E7tzdBp8n!r&cqe$lfKP3(tvd9`8moBc_?_R;rFsA=JZik+oCp zK5V|1xzKm~>*RX!rINE4D$R#S8+Frp$?Xr5>TqmCvUhlzY1~kp%Pt2>-s7l0g?rz% z{V5vop8DApsS7|6k*Vij5A3FWb4y3i0MIbo)G?CB>h~NxHMEpIqSS?)zM>|#%g3G1 z`R<~R;}^HeA$C=2Oww#%zO_bR@V9%(&M1wc0Jl>jA!)4nv!RlvWXZO-Fw_K{`tdD} zjxOuD!w=u!_zqSKL*e7AFKV1F8(pcuQTIuBfT5v1Qoqz*)}fI01eGbSRG}_fZBIN8 z*|&LJugAXUxF@)fkZ~kbeoDe9UyJc}`0o3>e@^+um2)@TtKrpd1l*7Pv-^cB5iZ=T z7BG%~H;nDEZbLKeWo{+xgz(`C&AH`j zYoYSDGtbaOf|K?_@zm*7X2q9<*)HQoXM(dchi=UETitMaeZuvC7n+geOV}oLJ@io7 zRaz>q-I?H&C}S#(K{dQBjC}=bvvo#}HYY}4MeJRR_tj=;bWGqPN*_4++Q>#AZ~E+! z1*`@v_QBuohr}yg1=NEd+>%kjpWat^0*61jZLveKqek0 zapPDCw?VcM!3zZjAx6=(jAyu8Ox;_Xk*1zm=Wc4?czzD?)3%@G@%R4`(9ILwSAJp} zN}r^p=bD=!#$!0zoprK}Iavj|d0vSU=7WoSgsygkRPamTlQR)F16z||b%?%>MnUt> zexu7wn+EI+2pr>2;r`c?{HeIW&PGUYE=Q_Xdv0uAhzm=C3a}qcHiO9A1{q$A>d2$B z^SvD!W>qh&qwyu#N()k>;9M)RZbY?;OXV03PY2UV{z+Rl2A&dyy7=YXL?8jKZ-_Y< zbfeJ`63;%jW0{DHu@mm!!S5i;h}?mgBZKX1QXk8oGBeYTi@Vz zWb;iLN1yK1?EKLFg2H6x)BJ8G$%4#C$lJ5cQLNwu-vM=Jhx3yOMlvr-L~Aw#h<&Tf`LX!!f%&y;>U-hH`y(WpE6R;^ve0T7ysVaA z5NLiqxu#h#s2ouB+?;tR(0yU}NpV_4rUc;+OLP6jktEL9%{9PiD8(~6$|!Q-u`{kV zN1ET?d!foc^MyR8AZgWUx!Yzs0q_uTf?F7-X}oTjK*}7@O(1j@$NqEO|KnBx2_x+# zgOc8_Jd)y4P(+Guir!R>fMg$D7$D_9El)0+1k95^yoVdyIhW5D3cH>ZeQRBqG*vwX z?a-fx(eWiQ=}WrAE-6YdhfX6I*{l9e54ew1kbZ8ngB~OOER8O|>KDH4Qg%(Gk+9;4 z11o>K@wCfcZXz&!U>hXbkuoHWjYPmc*BfL~uOpvRoT}VcF>~J2(@s#oc{NMUsJO^c zE@|FTc^OWT`udNjG@$>8ZQq~8^OyZg-)Ob1z6U2ydSUxcYH@^?b@+CRJb7kxJ7qW5 zO`y{(M;J_dk^#DjkKgR9w;xDAEt92~a1JOs7P^{X4cXF9LvdPp|K5e@VOrOiXx4P5sX53R;b*} zs+sJBTs@ywRpMsxIPVM9NP->ACAmF80?>O@W^z>IYzhF3UIAOud8|kdf5pAy zZliExU$j-WBA~pC-38yO8nOmpb^v5dzs^K!ECgRnH=!u6DT>) zemL?b((?x02Q$SR95fy*7(l@D3gI=SyNB0MHbNZi`Y8vujj zT(elP2Vk`}5@|B(`%(54#j$0tKbFk?S1tc-xle3^!k!Xeul5udHF`|H#Ku&*#NpTK zH&}qw@cfiO;%bf}@nG|Gr4OiDht;Lbdh63YU-0+DR3DsvxIFWWHf4T=ZMVx*`r9NQ zaW_}nZoyW$Ht-hTBH`E!{6TvZ^g~jb0}ZzT-A2aN*0^rCS?!ol$@!XP=71~Bo1>de z9Y*qhXYFJE!U_=OcbWougUU0+$664`j!gthBzbWJWhY&Uz_rxuq#nS8Z`q&~taHv% z#nR$n74H?5?}w|bl5#4!Sp6?7u*({@G=P`T=3k6QA66t1=}}->

m2M)*TRE&r3t ztNxy%&ck8Cv?`Ft$d0=@*O5|>mOLsmH)FaMCOV8#ss2t61KR(lJHLGa|GNL+Pvh-f z<9%tlZGe?UJ5un+wPa3~Ko=I@k0`;+Q3NAM-Gk#!p}j)FJ%e+y^RMMB7j4BxIwhhD zwn0`B%^O#@NaON5sn>|dOY}7KG;V&*F3nH2aDquq0wzX^O7YK^fMMGEme4%#u@I^c zgz{)sh$_RDw&XK(h^>NPbH&en%mGWHZz@ke*4U)DS_3am_br*6b@~%>9_vy?Un{XKK4;x(SCy$d?v|T*Va8uv%cD0}*yDV~2ESE6 z#}WJTQZl1xNSB8ZiKqKh_0ZmTRX}g2nEi#B#<6DZ+j?A{R6$Lg{)K0MC~1JWZQ7BUMR|67V)MhjfOryNwohin3uY^XNvMo2Mh+dK^ei_pI#`aYMI9Wo?#2 zH;#5^Avvr_KIcVHE?7kxciDTA&3L;<{v!GksHLJ|ktA@L!%(%GZ*m+dm zF&RnvMfsr>!_dhHUN0_I5*Moq{x${bJ~Mv__?aNXrKP|v4sEj%z~T{_s-kD^{pyV8<6`3CkApV8f93Wpg+<>o_5# zULw{gHGCx5Ysg8|u{YD_NW`69d2p5n>a;*PuYnr2+DZvrCb{T8i}{8*2MzDiGj>lkut3EHFXt-s!~-rpJswG4c3T=f3$Rq;P= z(G{o&J+5Vd#; zI=N#*DrSuC-f+&+$IsL4rW%$l^AY`T3DPY{zs;YHc9uI;uiic<+sc)2I%EivZFRY+ zbM1kOvD$EwuLjlJjd!`p{=M?8q_d>4lIP_nVwsqdsNI9gZ+%XZ@j2qke>GM~OVA8X ziZP4g%&K@rfia7s-uinDwT~xffUN}JmL->MOdai(MTWmo$yvtC7zx-H=+i$eKl>!x zgsYgZ=6iI>5wRKV!l67N+p$=sD1)V@h~q8ZP^2booWH72qAEU`uUvqx#D#I4|7KG( zLVpP`l~V6sp764$hzWkmjFQMIUnhw^?rP%W<{!AZZZ!aD1AU40oAZnuS0=eFIs&m` z=?m*d=?;oZjeX1C2IUC#8%3*Tourcpu}dv_WBsNy;#$B`4&#=>HGkzz>Ca{Nzf!Xt zGT?ejZXgQ=4Cko0#!yNcH5=EK^~tm4zECe9+`s<4uLiFS_xB|8uhaMJ9VwJwxH;`Z z&mI0r#AoB&WD?mVO9m$e%FU}0e6&;u-1;l%`KohXbQyKlLe^v_)A9+Hrwv66%xRkM zJT@Z<(4I-O(t*SXw&jzDDV{n`iIh|<*Z;-T*=qG-A7ddH6^EE z{f@N4k376EasDJ*)qh&opBeVNC?j4knX@2T6=oD^UHd&hnbXm-n{?7lv3c35gPjpu z10dy7PRMn$*S1bXh;Dr$v$rYLl#4PV*Y&|)NMsnKlr!TkL%2L${DfQ z9-n{#!GUS@cbREir0%&!fdZ-kI?y1J`URIXX+?1HQMY=)Wz*vZmM+noh$ioK$rEo` zyaPQ7nP2m#VI3Cl^Y067Bp^T(Lo|5wR{0QXA=i=d<~W|XH6d1ig=?FF--G|=&FXHC z!29ntq7Oo63go9zx@!mv_}G@e*nr9#&tJq?h=0W@Z-9(5Ug88q{A4&FaVF-FO|Js; z7?${nOnKynd#+N>FH%==Y^Q0Tdptz-X_^#$WS+ti|HW{x77Hh6Bd;8Flh_6sJw~zA zi6$U25bKgcX(NcBtz?!&;j^@)CM5NJrj=5?6q!y@&RKaHv=^OT! z;BgqPKizr%zgX$Nl6|k{J;3ZBhUIge&ZR2bQPpM6x9qU_qX>p9jqV>xb@Tl}E197^=X7fchOSS@4S6SzdtAzEo^kD?gt~XWl3`p+|*YabPd`OuSV2F$)g{X{9zzH1S3Xw#%nM4|nHC5j4-z12V$hwM~jYj!Jn#^U`4Lt!k;zYZ=t!&qU$%S+c;;_MV`)LPaCAUfHy zi5hDIEo?K`79*N|KXV0srfQNG!H=-!nKp;R`hrUf6Bnzcl8yYXG%w1=pBnP( zB@*R>hTmLjyU%Fe=0y=x?R z!TNlo(1p+Da2k_97lSPH+72&*am?$>H%w_ZtG*tF)0???}r)-%@Zj-H#r_Lt`@j2Fyy(Oh$Vj(K>;Wz^gT*gte6BGkJ5dj zfzs89PTo6pI`;IbyNr37v3Y{ytq4zYE!DejEsEK%ps&OkN#3hgV(LwxFXsI9rlri_ z7cpZuPbcyOYV~qXcG<+@nWvo`pOMhzs|zTSt^TAXPg9h7@GY#Y@NT%vZW0W=}lWnFoLtP%hf-0uoCQ-}K5%v4x5FG`>M;$2LNDm*H0^P_=G znXiGncKUm$VX!{X7bEB7Y>Flj%^TO&8JYxCY~SzHZGR(k5JA1k)|p|%ytf>JA#eWx0cc;uEug2N1Eh`UtmVk7u380WlJy*`?elQ zmZSO9AL`->Xr+?TUoLlZRh3PoOc#0erU_^iD0rT%?N`7RnG6L+`^F2MGWz*~A9HS1(f-~zv?>f+qjR_mL!7QUqy zbY9sV6DU7ncjcU>q&-}H#B}R*HwwKy`07KUct9EFO1t3(q)g^he%Up+Y74 z>Q^4Jtm1_q-TJm=rAgEI?< zvF6ngPb9rVK4kZSP6I{Hulwf1jmpay;-F}6{A$dHhW6ldzPrv0=sZbsWjS14M;uP9lQT z$(^@Bzz`61xqF@yVv;}qwrm@uAvB?19BrW52ws$^v64|NdwlXUyE=NK%H_3hyk!~+ zOg5txc%vjvkG3!<$!cO|BTQ~0PT?3F*!WxjkgWcx?o(aYi?kPq9outar&?kGr--eA z8mMY0%LfcP4jI^YNPdQVbF}n@DY?pnERqxW9v4kmG-S;`uV6! z-m^ZOsw5H0YOU7`kEkJJB0f)k^9FphHBT#3 zd$5hH8zXM>jd|f@@!#vB1y{qr8}3ze^0x^bu&DPy7(*WP17|f!W3d##;&g_x4Usv- zJYIj|-X$+|oTH5XUCQAbfF~uH^QJG+6yPHGGV?@@KR>+N>Dj@xKX!qEW&8*Ce{u%L z?_%!r1%}_2_fJ1x;b9sT`s>R?Ac82 zw-G`Ay`Fl0SJmGV_PT%i+j#qZQTVsz-RE1Fzb!6)7qVL0;gvAkv!)%%5HQaTyjcZ~ zV?RA`!x=j<|E^gzs7^D9kK0gPyV9G6o*%Ab6z;U zrk*DYFx@$nWZy*|TO9FU4EL%K``cvkt8jZ*h5L=r_CLn_xxo8-8~W$60{`&qHUHGp z+TL z^z~!qmlPW9`pKe?gqwftx&h1g5AOe1&-eEc_xoDG&n5mh96<1M2K)Hm9$gFlt_J&l zGvGHRCbl@dOEE3gShFFWaoqg;N*BG#4+4>Wt^7aj-3dI@+a3V$QMW<~p~z(tDw3Tf zOP3;h*S<52eTgC=OUs~TP?n*wUD=X^k)4zgX_~}Xn~+?hY-t!-#{1XJyZ4oQulwFr z@AKaA`PAS1KQrfl&i8+2ocaC#=XYrKr^tqH4jS|?K8-H5Y|9|JsvmQ@@|^XH#l3%V zyUaX3Uz7^2u>Wbg(J##Y?#02sIKZFn_~V&iU%dLi{A%*&^#x(cJ>^Z@><>TPs|ABa zz4vSjfU{;v2|kC#1x;PVQCu;k|=OFdrh?3gcJ3&N5hEctJ( zefC99`h3UoZ(;h^SAwwQpJqJ&S1{-i2uprJveXU`mRw5Z5BDv9;?HNrfUx9eAWJ@6 zZUTlcUJSyLAT0Tfr(=HJCqY;egeCv2We`8_NMAe#VaYE@{(J`rOMWy0VaY%5Dqrgv z%gx#Hd40g$lAnt#`54?Sxg^UU?fl)aB)vK`DN4zDR=M)IYlv0vwj~Ss@4TqEdLCnz ziQPn?uXC!mgEFqvNvw_eog3i(t8Nbpi`%y&`usj0HfX{i;uH^*Li)6|c;paD&G3r& zlKlVA1I~zj7w|W2JkRrDk~>;b_pHC5!u!aCeIUi-z2ac_CHa3&2GjmNr%qI+&WO_|!QA+zu9P%2L8isPEDTvx4f zXmu}KQI$rvVHnPtp9@LrU<}&!sS(UJyAY(8euZoB>7ku@V!1a|SQcQ9b49A>ez>l3 z;M0@-b@Pp8qd_xxO5NHlZR+|8?f2iXc-}mVP^`RyRURXV|7LM|n`0Y#NxgF-gv}(v zD3N>X$v%-laay0d#{#T$N5#@_i2vtHo=O|;qmI<&i>T8>De9vykd2G7;dVvo=&O-~ z)U!#YAt7z>U@yce0p~7rqvVhWax?bXnobQ0)z_UE173XU7wdm`w9ynBu_b)KyxEO5 znvRi;R-a=>ypKgPd+f@&jd_295{q4;o(Oo6A!ofGl z>P_^7lhdy{r&RK)#RfT82R~JIm1%#12oe2Ot$Ebh=h=%pH*Jj2fxNWFM;Xl3?}Sx6 zCPJ)Ow=LPsZ!{a4<8s1WfpmQ6x@$vP^YWAd{O!!J>ue;n>yP9qqR1q);wZ;rE-{#* zLg85}wajJj2vT=hH0%-9S8Wxz zg2Le`DywX6EZyk;<5(M%CExkP8g1^q8rVI~3$Ur@ryO6^u?$8{Bv$N1oz~!E=pBiy z8s=s>^Yj-wxHVfy&(V&g+gu1unLH1d29@p@WqqysFFLu$-PQ*74Cey9+`tgb|a-VsRRHp$o&r{5QizkLh z%B%&gI5>-ZZ&1S)*o5Ujnzp#&?V2b3dE4|bk|L$C{c@xS$EOJ=&CODA&U^Y%2oHf^ zg0$lTOt$gJBg`+$%YW8fXm!$;at8WOgq-OeoB}W zd0V4*aQ??3l&#RqGap@7HJ@mxVT?b?B~`aiy{ob;i_^B4gnw(0rXZ_xT$+Cnn_jgk zm|1Vko6DHp_KP#|amP0w6v93v|I1U$V4To04mU#zntMS>oj$CI)l8&^l5elNdb|Qf zHCr!1RLtF%MTkd{^g=DYiKFTynaX#;m-LK#p2cO6N)SJC|I)ry7X1q_l#ExhqNvy< z{YYA8&1)4G8bw~zm4I`0Qk@QD&7Si1pSoxC1WLW*7WUKOrCGkw-$R(OwWHh*##)!k z##X0t-Dp86x4RGFGc{71*56R!y)h$vnt!^=>!6zn-A^Cuy)iDVjG61ndrZ%FctL95 z0_?R_ml`WNW|tPHihtoe*++%9boh)|gc%{;+TxyDzJhQ{zi9_4L|(r>(}^;I^-FsL z{o~4vEZ%M%zqG{v^HqD;I<^P89JT=m?kg8nbh=-%sFA=Q7qTZS3Z=~!`YXQn3ZhhJ z@@&hv*1mnf@)}w%q_4Oj)g)7TH~#I9(6WbxJsHZ^oq~&bOI5ZfPJiH~Wni2KP`DZ? zoez2Yw6%p6WpEDp1VKN>UP_&qhhbZJeB_l*jsn_1Qj#QHs#l{)kS;C?{`d0V?Rj zixKf4f^k6P{AtBtAJ^duW8a!~wkWH73~OX>)^Ylv+6CBfLhJBZ^N0fA63J6Zd zfHR9lN$=f0&l*>WYdm#D>-pl&zq$>U5hCU2x||e5QC^|!lQmN5A+XSgHR6$(tZNMW zY(!1s6*b?V-$8CXdA{CVt5Q^_u&tO3?L(n2LT~k^BoBnn-1H*bj;CU*+kxk|q8Ijx;;tK;y$kt^GaVpBr^|zn%Nq!b(4? zs>&HD=pti}n$Q#Rht`-S7PF!PuYSi`%WF6;qU&GMsq&ih(DS#{w6Jz+)*YMDiQ~UB1{>i7??O^;Xd|?}+Ac5p@ zp-Y45ZW+Sd!$&!jXM6HuS*vF-W?3I1&qV1Dje=HQ$`2QYmI^Wt>%}-hXQu$; zG}Fgge*1k%ob}HBp{#7ZjZoG&TwLZBId7!-ex+yo1#?TtxWo7n?i<~&n@5}Pw1^J2 zl;nwx=;w&oCBp*>Ran=(gvnc$-~*`F9HTfz*PS}48+@p~zbrikc^FXbrtD{yq09yfY$RF=;pVR&iPqqhyVl z&rBcOlBC`)?SE9__m)$IEqh26Obkc0&1sjEb;3_jnNsyxCr>vUX6uU2>#HcIyQ$z0 zeDYn>+8@V^C82!X%`&LS)7G#1ns5?fuXHq7BYPNaZrE#{|H~)hzu^VG?VY=K^wde* zz32I+lsFWAT9bpHH>tWxAkg-Gwr5Y6cNU6*D`wU8_1}T1dB!?4i_qrn&j^oA5_?ow zlI5-mr+V9A602{Ts=(&Y{BDDm8{t#F)Z%QMrnnVX<8Ne-junW!E=gp%iVB)utsv!N zlNaZuBy_f&{6799(VPXVTqP2%{yb$AYZa|J#y+ulaJI2sWiF?0kuD8vjaq=IG8Kmu z?W%7#A8@Zg%-tE%ph1iF_CFWVPrFRXfbUMwItDW!Kw2ji`D&d`^S;>Rah@o zOjeC=k_AhY4yL24YIin2eGvR{6g=A4rrKRj4?e)nO27Cy<&*e5!C*j1&{i%$kb0xR zYfo!?;e>m2t^)sD_p1tO?Ut6``*QYWy}sP@J}>|AUY3maCQ~uThP)SGx>fsJv`9_7 z7kfFq&p${Bt(4DJaLkVN^m~<%>71Eggho>9r0-AlzE>6ME!2|bIcI7{6dNXbxT?h< zsHdaw&zMcWJjmjn%I6mEe{@H};j12!A zrFYbw#(7%pf$yiZBTN5fv+id6Sp#Q%ARm8BfGV*+sU$v8(KFASWsY_DXA~~ztW4L6 zx&HAV>x0i5e%^X5g^w8QsP#@ld#!Wct{QleVs!=XWuh`cWak1*^r64n1F^-VD(iI~ z9aG7+53zo=`b&m3-L zU+kZ~QDg2K!jMhS7blMzhP})%a2z+pm{lK;qHd|l8%OJpt*;eQ3m=I!8?@Aq9BqV> ze+Nx_dml9D`_zU_V|(^72Gpkr9c^cyAKooiXK}M2p-JX3lnL8IK&eBAIe4zx$>{OF zKf+uotN*y%Wd_q>>#kipF}fvwoclR5d&c|Xy)AuE2$CD|2IGh1sn`+aq~eTxjiyue zJ$qFG9ewS#r-H7HzjWW&vn~~Gfjkwb`sX=2vq*2kZtk}gBA)qJ>s9{!DX5bYX#Hfb zI;Y?%v{GQHmGG6p(y}P!ck+RfKcbbj7hnX|Sl8c)ivPz=hf+I76rHcea(SEa&HVYqAxzKc@-O0vGKACDM;vsy5n^lZRhzU zc)9TQ10P=nWnAU&jo?oyRNTMS6N_PU<#Ku1ekJUXyhZR)(-~#S%!f64B>2JY>s!M* z%zCF}r#2hfr#d?8YRXas1~Ho6{U{uPxtwLw{Jmd_`}_Z2?)!_*x-1cDxf;5&@4Gav zu|4Ui($>l=WEo9KRl5=vl%kXYhI=QC=jwb%_@&}s?lGVn?4Mh``7tN)HZ$rec7QhF zYmlaC(+&j-Z<--U(t$_TjP+*zfXB%i6x|!{V~n*Y8_&FPc-U5&%tC!NTh1qv4`n&u zCfIW3?{D=tE^)c(t6;Db3R=C%t5e3sD9?3S)4bFqvcq~vD1QBr!+3}8lxI;MJ<;t` zYAongF|Y3O4g_4iT)(k?Pny6EGPgy8EK#NM%KU4u;n1F9QwqMFPS@!}yXmAnP2%SC zWKB}T2ZRke?Jx2({G27*x!W2~qBoc%XeyP>2Um=o#pI48C1u2BaPOCww0K5t*aL;b z2n=>n8VH=3J%)z`we2^lu>W??Kg^c}TX#Rw*3?)LWrg4@sw^=L{P}8@HM+ahAHk`{ z11-6UcjMm}ub$3My8L`&{Ry5HYH@xF3qfWFEo2T_{bPUNCQB7lH_U#2zP-c5DfOR> zf4R`K&K0lA?hr74u_ly9Q(F9)fy$PSTvPQ4XH^oXYSUOt=U|i#3CFXoymP0(3$DGe zG-yqlV9^le2j+Lk{KiG5c)L~?BC~eW0?hF`&noNjzujc9z~u(cU{Jr1Re#D@T}QLL zy+`=DgLbU@)?%+Z8L@oxBVc)6*@Q)}OtSdRMj#4Cds8_Rb@I9}k&B(xIGssT^0^KyF3;~i}|Tw-24Efr$N z=~c%Y#Ih!w+=noS{riTniV~A5Yh4a=E>`}3XS@72G2JVN4P|EsL_}|I2{$9%Ey!iA zSGy%U9C_0j$^1a%>7f`quGJRE8K3w!d$@9@~Sh*AV$6pa)Fy%{>U6x0(KddHx!w4g3l%dap z)yTkdLjk(_>*8p|1Lu4%J}O>w*)S(K=9u99v7Km#1m+xu%tEZh0_;2J;tRZqA$1tF z_I35ZZPvGJyBqc8ZZjNT?-GT0oxcU;`v6w!9R*{ z_C@<%?*TO5e60XFhOdNd9_*8--2L5KJk4O2esa?2smc)F8YSwCPE1pXa(QkrUOK2l zgFaZCXsRv7h!;;_lndf=)qL`(+r}rniVGnSg&#&KKa-eTIm_inieRu+VlPd^%B1!+ z8q?6#gVQ5Ag*w6R0{ns+X>an>-EfWYmng*EXM2t6XiGD8F-pfKnepTNy=o+h^t1f*lzZ@dFiO5&PZ_qdj&4wISgWY51v zH_=$JIJ3Rm?cI_>p|55yHcOh566{>t=5MxUhK(6z!yiD40~NM18*Y;rJF{}C->l^2 zM$w@P($RE83JF|0Q9CeBZdg;w$E!eR_K|7ap0g||J(UDgdza+$X!9SbeM1x?17dI9 zGiIf@wZ100zKx_naS=KZ^@|M&87j};l;(28}*Y7B0sHQ z$Awz($BP0%Pgq^|;Doi_gICx9lhyfmvz12-ON-Nt11n_RshJol_aVy2y32c^7Ri_z zMVRD6i*wuYX|lwnO(Uj*JD3fadZ!NJcZO&yiy?}O>?1mivRu{76M|E^sMXJygP;#O z9Pb(EAS?E>+>tj_0lM*{@oQIK)*3xp>Yh87k3ujUyFtojx=O*@YdEaifBgQF&6eu! z+IeAGB(CZ#6>}?z;rX5x=FG;BL|Gh`jogoxH0$piZIsF28VGH)bHq5E&q(!vZosre~zuv8U4gWZy{mLJI z*-OiOg{pkBLx3T~Z|K1g0$Quq8c9Kl&+*P5aMS;EJ1z8`r1V9^E&E6=F=N}DmrEx> z{te^b^(z=c{9Tg&NjYE$@v9yfLO>z4Wv){4tJ=!_28IwTXS?i@!4Tp%RA2}Jh7iB$ z4j>Bg0Rs#ne(e%q2myu=zxD>`x8*Ky`!6s0AA10X5WhL0Ux+Cl!=fEwwJy;+*ZQjRA=g+ zrt3>6%o=5%KJ)45@L?QL`YFa#O-gM)AH8>-#0BZ#4R>gbOn2@qz~1>w5BdAhYV+er zGvD34vf>r~bO83kYh52<&=n6hR4_9kb3Z;hKKCL7R*ZEX<`#HE`+S(jh1$hi(&}0Jor+xN<05vYCD}5<&FUnP2{lO~a}(#U`TZE$W6)JH13t9{W&LDz z#=^O>9aARd30-l}oV}zUV%>jtPWs>8)LqrQxVh<_90jkfM<1CKRad9M4WE{W4G;@Q zv$--TO)_ExlmOvR=V=vzWg=P1Fix(ant7wHmB-!;S0axKc(xWI$krt&F3Z7 z!S&_q6-L6n<#2Q=jYiF270BoB9rTtO>eS}jaKf3*|iIp@^C_KKJHrJNOm;+z_q z&o30;&V7j+$$*$@iIOsarb8cMM)T9xL6f)e*TpYneVo|6cL}mKix*Kaq)YHkb;MuaH;9kFBwKyX~vBU?upr&n`MpC8W(Mv&?g316^cX$u<&zLPC>#xDl4tLy>A!{DfubXL_;jJ_Qm7wkz9HgcWVtH@OC#>1G?U$;0b8QiP`JNzUZYyV5-I@pYuX4h>#W)c-gY3;{ z4G1`i=L)5E3t6Opc(17KW;tH&o&BAr`k3%t>JA;lLs-A!Aj&&6DIK`m!7A~u(fE(Y zpgU3)(`%$%7QchYX~nq!OYP&}m#M|6j+48HBSG(@K9YHsIt0$V&_#q_;u7*bfeRO^OeI+i^LqDaY+*({ccQv|%v&hUx-Cif< z9C}AsrumVG#_I0KTRMk7v3<3sev<=*m5$weX4cj-@3k7Exi7iJbrtSv3QDgEw!u$?G4rncUe~Tv39RG)Xl!ZBU3-B{5_~YHyeM_-ncJW*Xy(nZO2FTGwHGlZetZWz|T3K*4!?^v3q@?hmSj zgs0yfj+$}6I#GxU@1Sm`o++_u^1#;v`96%T)b-q};R@L;!SmeZ7A}bP2?9e_s;H=m+=}XZPJBZU_)}GuEc`!=r4f+QkIgJ`Le^|$sDN5R&ClaR^ z9BSV(CGaaer5I$DWU~J}soFji*`ZbO^$=hIa@`CeeepVNY?sJ{lL$Wn=k_!Od4Bqt z-eD~35Jr3m|PbYqJr_;&~aF3q|2J!d4qQtj5l&7WkizoVxEy|KpiEahg1gTC( zTLi-fYFH>*#?}{%qIe%t@U5I{Z{9uqLY?IVYErnhAYoHYKbh+|H>ZyAVXcvD0lqQP zY~Y~Vl-l+po-;7mtY;$k<~lI}T+OYVo^HKa*(u6EjsjwYYL7w|j(j(Xk1#CQeKV$D z!&{5l{ibeM&O>*`v6Zx{SQkREx#v`kPQk4G1)`Z7d~Hp>n|vsDh&ib+za*|J%|p}1 z#|*ImtCN_7^2~A0eoZvG-d7uW^2b2NkBHfJ+G5a>CT)x?v( z#^NPhgbPoXtv|MBwu8p8@tSi3y~Atw}_&QeX>1)XOmWc&eKe^tnd45u{r&-E4Zu|JV-FAthoIz_`8G|T%^TaNl zXiF0;yvjK9q5CW!)0MEBt9f>PO+KXn)s6YJw{lE-$fWYZQjDZYNv_DwC_m(DXu{P& z4*W&SY@@kHrI>@c4B7Q=B-=QtVYJdCj)91W3$Q(U)GF1JHU*EXB5d~-n)bPmzaTjkFknMFL6c?C6RbV6*67wtrHlD0B4ww)=(4tcR##YL7j9C$)aoO&}j zij~Ry*cMMQp`SGbDp1NTZ>RBZU&i*M6p8troJX>XoeaXr#ZrM4^8cIjaF;yct79G~ z3iYU zta7gAt)3C?ymOJe%+|80zbkMv_Ve*p(HKdO=(#KH$mDIRzUO;Ed=@4{TfUf2p-QrD z;$`|%@>GMabb92_8J9!%$G>M=Q$%a0&o9A`lq+YrYijD(iBHyTQsG8U)gniitnJF} zTo)dH!olFN_x$5i^hLEks&<9#86vHMVdI9KV+5M%IfVy)7QKgt-(tH%2DnS8?#Gav zs-v`*9CdH%Ac6JoeDGTyAk;mE)lLVWc6=A4+c>RiUCi~Un$c|lbxf$|sCZbWMM8=X z>jI4G=X6kp&;Ien=o2lzor{4)-m)B`Km)TK(Mn>ZS; z#CF+S$N7@kk?Nv^sT-E0s9R9ZLXmMa9Z#P^@pv2?*7@g}Ec|OCPeJmdDoxM%O?q2n zdS?W=Zy&4P6`+iDQVZREi4>;Wp{L&++=@e|=H0H3>E^O%WXO4b^i+bZdsME!K7ltP z<-MUZaRJ8ST0*FV_hworo5wrY6k#fpB%hmL_R1jOZiVgP8X1uj!<@rkSz##|un!0T z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; z0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00e*l5C8%|00;m9AOHk_01yBIKmZ5; p0U!VbfB+Bx0zd!=00AHX1b_e#00KY&2mk>f00jQG1>g&v{{TMdCT;)# literal 0 HcmV?d00001 diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index d2ff51d8..79a9b6e0 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -8,6 +8,7 @@ import rehypeRaw from 'rehype-raw' import remarkGfm from 'remark-gfm' import remarkMath from 'remark-math' import axios from 'axios' +import audioUploadSVG from 'assets/images/wave-sound.jpg' import { Box, @@ -85,23 +86,21 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { e.preventDefault() } const isFileAllowedForUpload = (file) => { - // check if file type is allowed - if (getAllowChatFlowUploads.data?.allowedTypes?.length > 0) { - const allowedFileTypes = getAllowChatFlowUploads.data?.allowedTypes - if (!allowedFileTypes.includes(file.type)) { - alert(`File ${file.name} is not allowed.\nAllowed file types are ${allowedFileTypes.join(', ')}.`) - return false - } - } - // check if file size is allowed - if (getAllowChatFlowUploads.data?.maxUploadSize > 0) { + const constraints = getAllowChatFlowUploads.data + let acceptFile = false + if (constraints.allowUploads) { + const fileType = file.type const sizeInMB = file.size / 1024 / 1024 - if (sizeInMB > getAllowChatFlowUploads.data?.maxUploadSize) { - alert(`File ${file.name} is too large.\nMaximum allowed size is ${getAllowChatFlowUploads.data?.maxUploadSize} MB.`) - return false - } + constraints.allowed.map((allowed) => { + if (allowed.allowedTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) { + acceptFile = true + } + }) } - return true + if (!acceptFile) { + alert(`Cannot upload file. Kindly check the allowed file types and maximum allowed size.`) + } + return acceptFile } const handleDrop = async (e) => { if (!isChatFlowAvailableForUploads) { @@ -124,9 +123,15 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { return } const { result } = evt.target + let previewUrl + if (file.type.startsWith('audio/')) { + previewUrl = audioUploadSVG + } else if (file.type.startsWith('image/')) { + previewUrl = URL.createObjectURL(file) + } resolve({ data: result, - preview: URL.createObjectURL(file), + preview: previewUrl, type: 'file', name: name, mime: file.type @@ -240,7 +245,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } const previewStyle = { - width: '64px', + width: '128px', height: '64px', objectFit: 'cover' // This makes the image cover the area, cropping it if necessary } @@ -514,11 +519,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { onDrop={handleDrop} className={`file-drop-field`} > - {isDragOver && ( + {isDragOver && getAllowChatFlowUploads.data?.allowUploads && ( Drop here to upload - {getAllowChatFlowUploads.data?.allowedTypes?.join(', ')} - Max Allowed Size: {getAllowChatFlowUploads.data?.maxUploadSize} MB + {getAllowChatFlowUploads.data.allowed.map((allowed) => { + return ( + <> + {allowed.allowedTypes?.join(', ')} + Max Allowed Size: {allowed.maxUploadSize} MB + + ) + })} )}

@@ -727,7 +738,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { {previews.map((item, index) => ( - + { alt={`preview ${index}`} style={previewStyle} /> - +
+
+ {previews && previews.length > 0 && ( +
+
+ + {previews.map((item, index) => ( + <> + {item.mime.startsWith('image/') ? ( + + + + + handleDeletePreview(item)} size='small'> + + + + + + ) : ( + + + + + handleDeletePreview(item)} size='small'> + + + + + + )} + + ))} + +
+
+ )} +
{
-
- {previews && previews.length > 0 && ( - - {previews.map((item, index) => ( - - {item.mime.startsWith('image/') ? ( - - - - ) : ( - // eslint-disable-next-line jsx-a11y/media-has-caption - - )} - - ))} - - )} -
+ setSourceDialogOpen(false)} /> ) From c609c63f44f61267ba97dba8a8924f4692320681 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 13 Dec 2023 22:10:00 +0530 Subject: [PATCH 08/62] MultiModal: start integration of audio input (live recording) for MultiModal. --- .../ui/src/views/chatmessage/ChatMessage.js | 537 +++++++++++++++++- .../src/views/chatmessage/audio-recording.css | 278 +++++++++ .../src/views/chatmessage/audio-recording.js | 433 ++++++++++++++ 3 files changed, 1232 insertions(+), 16 deletions(-) create mode 100644 packages/ui/src/views/chatmessage/audio-recording.css create mode 100644 packages/ui/src/views/chatmessage/audio-recording.js diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index a4a13df0..d7bbaf9e 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -26,13 +26,23 @@ import { Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconDownload, IconSend, IconUpload } from '@tabler/icons' +import { + IconDownload, + IconSend, + IconUpload, + IconMicrophone, + IconPhotoPlus, + IconPlayerStop, + IconPlayerRecord, + IconCircleDot +} from '@tabler/icons' // project import import { CodeBlock } from 'ui-component/markdown/CodeBlock' import { MemoizedReactMarkdown } from 'ui-component/markdown/MemoizedReactMarkdown' import SourceDocDialog from 'ui-component/dialog/SourceDocDialog' import './ChatMessage.css' +import './audio-recording.css' // api import chatmessageApi from 'api/chatmessage' @@ -477,6 +487,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { getIsChatflowStreamingApi.request(chatflowid) getAllowChatFlowUploads.request(chatflowid) scrollToBottom() + initAudioRecording() socket = socketIOClient(baseURL) @@ -519,6 +530,39 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { onDrop={handleDrop} className={`file-drop-field`} > +
+
+ +
+ +

+
+ +
+
+

+ Audio is playing. + . + . +

+
+
+
+
+

To record audio, use browsers like Chrome and Firefox that support audio recording.

+ +
+
+ {/* eslint-disable-next-line jsx-a11y/media-has-caption */} + {isDragOver && getAllowChatFlowUploads.data?.allowUploads && ( Drop here to upload @@ -750,7 +794,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { disabled={loading || !chatflowid} edge='start' > - @@ -758,20 +802,35 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ) } endAdornment={ - - - {loading ? ( -
- -
- ) : ( - // Send icon SVG in input field - - )} -
-
+ <> + {isChatFlowAvailableForUploads && ( + + + + + + )} + + + {loading ? ( +
+ +
+ ) : ( + // Send icon SVG in input field + + )} +
+
+ } /> {isChatFlowAvailableForUploads && ( @@ -791,3 +850,449 @@ ChatMessage.propTypes = { chatflowid: PropTypes.string, isDialog: PropTypes.bool } + +// audio-recording.js --------------- +//View +let microphoneButton = document.getElementsByClassName('start-recording-button')[0] +let recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] +let stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] +let cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] +let elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] +let closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] +let overlay = document.getElementsByClassName('overlay')[0] +let audioElement = document.getElementsByClassName('audio-element')[0] +let audioElementSource = audioElement?.getElementsByTagName('source')[0] +let textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] + +const initAudioRecording = () => { + microphoneButton = document.getElementsByClassName('start-recording-button')[0] + recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] + stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] + cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] + elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] + closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] + overlay = document.getElementsByClassName('overlay')[0] + audioElement = document.getElementsByClassName('audio-element')[0] + audioElementSource = audioElement?.getElementsByTagName('source')[0] + textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] + //Listeners + + //Listen to start recording button + if (microphoneButton) microphoneButton.onclick = startAudioRecording + + //Listen to stop recording button + if (stopRecordingButton) stopRecordingButton.onclick = stopAudioRecording + + //Listen to cancel recording button + if (cancelRecordingButton) cancelRecordingButton.onclick = cancelAudioRecording + + //Listen to when the ok button is clicked in the browser not supporting audio recording box + if (closeBrowserNotSupportedBoxButton) closeBrowserNotSupportedBoxButton.onclick = hideBrowserNotSupportedOverlay + + //Listen to when the audio being played ends + if (audioElement) audioElement.onended = hideTextIndicatorOfAudioPlaying +} + +/** Displays recording control buttons */ +function handleDisplayingRecordingControlButtons() { + //Hide the microphone button that starts audio recording + microphoneButton.style.display = 'none' + + //Display the recording control buttons + recordingControlButtonsContainer.classList.remove('hide') + + //Handle the displaying of the elapsed recording time + handleElapsedRecordingTime() +} + +/** Hide the displayed recording control buttons */ +function handleHidingRecordingControlButtons() { + //Display the microphone button that starts audio recording + microphoneButton.style.display = 'block' + + //Hide the recording control buttons + recordingControlButtonsContainer.classList.add('hide') + + //stop interval that handles both time elapsed and the red dot + clearInterval(elapsedTimeTimer) +} + +/** Displays browser not supported info box for the user*/ +function displayBrowserNotSupportedOverlay() { + overlay.classList.remove('hide') +} + +/** Displays browser not supported info box for the user*/ +function hideBrowserNotSupportedOverlay() { + overlay.classList.add('hide') +} + +/** Creates a source element for the audio element in the HTML document*/ +function createSourceForAudioElement() { + let sourceElement = document.createElement('source') + audioElement.appendChild(sourceElement) + + audioElementSource = sourceElement +} + +/** Display the text indicator of the audio being playing in the background */ +function displayTextIndicatorOfAudioPlaying() { + textIndicatorOfAudiPlaying.classList.remove('hide') +} + +/** Hide the text indicator of the audio being playing in the background */ +function hideTextIndicatorOfAudioPlaying() { + textIndicatorOfAudiPlaying.classList.add('hide') +} + +//Controller + +/** Stores the actual start time when an audio recording begins to take place to ensure elapsed time start time is accurate*/ +let audioRecordStartTime + +/** Stores the maximum recording time in hours to stop recording once maximum recording hour has been reached */ +let maximumRecordingTimeInHours = 1 + +/** Stores the reference of the setInterval function that controls the timer in audio recording*/ +let elapsedTimeTimer + +/** Starts the audio recording*/ +function startAudioRecording() { + console.log('Recording Audio...') + + //If a previous audio recording is playing, pause it + let recorderAudioIsPlaying = !audioElement.paused // the paused property tells whether the media element is paused or not + console.log('paused?', !recorderAudioIsPlaying) + if (recorderAudioIsPlaying) { + audioElement.pause() + //also hide the audio playing indicator displayed on the screen + hideTextIndicatorOfAudioPlaying() + } + + //start recording using the audio recording API + audioRecorder + .start() + .then(() => { + //on success + + //store the recording start time to display the elapsed time according to it + audioRecordStartTime = new Date() + + //display control buttons to offer the functionality of stop and cancel + handleDisplayingRecordingControlButtons() + }) + .catch((error) => { + //on error + //No Browser Support Error + if (error.message.includes('mediaDevices API or getUserMedia method is not supported in this browser.')) { + console.log('To record audio, use browsers like Chrome and Firefox.') + displayBrowserNotSupportedOverlay() + } + + //Error handling structure + switch (error.name) { + case 'AbortError': //error from navigator.mediaDevices.getUserMedia + console.log('An AbortError has occurred.') + break + case 'NotAllowedError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotAllowedError has occurred. User might have denied permission.') + break + case 'NotFoundError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotFoundError has occurred.') + break + case 'NotReadableError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotReadableError has occurred.') + break + case 'SecurityError': //error from navigator.mediaDevices.getUserMedia or from the MediaRecorder.start + console.log('A SecurityError has occurred.') + break + case 'TypeError': //error from navigator.mediaDevices.getUserMedia + console.log('A TypeError has occurred.') + break + case 'InvalidStateError': //error from the MediaRecorder.start + console.log('An InvalidStateError has occurred.') + break + case 'UnknownError': //error from the MediaRecorder.start + console.log('An UnknownError has occurred.') + break + default: + console.log('An error occurred with the error name ' + error.name) + } + }) +} +/** Stop the currently started audio recording & sends it + */ +function stopAudioRecording() { + console.log('Stopping Audio Recording...') + + //stop the recording using the audio recording API + audioRecorder + .stop() + .then((audioAsblob) => { + //Play recorder audio + playAudio(audioAsblob) + + //hide recording control button & return record icon + handleHidingRecordingControlButtons() + }) + .catch((error) => { + //Error handling structure + switch (error.name) { + case 'InvalidStateError': //error from the MediaRecorder.stop + console.log('An InvalidStateError has occurred.') + break + default: + console.log('An error occurred with the error name ' + error.name) + } + }) +} + +/** Cancel the currently started audio recording */ +function cancelAudioRecording() { + console.log('Canceling audio...') + + //cancel the recording using the audio recording API + audioRecorder.cancel() + + //hide recording control button & return record icon + handleHidingRecordingControlButtons() +} + +/** Plays recorded audio using the audio element in the HTML document + * @param {Blob} recorderAudioAsBlob - recorded audio as a Blob Object + */ +function playAudio(recorderAudioAsBlob) { + //read content of files (Blobs) asynchronously + let reader = new FileReader() + + //once content has been read + reader.onload = (e) => { + //store the base64 URL that represents the URL of the recording audio + let base64URL = e.target.result + + //If this is the first audio playing, create a source element + //as pre-populating the HTML with a source of empty src causes error + if (!audioElementSource) + //if it is not defined create it (happens first time only) + createSourceForAudioElement() + + //set the audio element's source using the base64 URL + audioElementSource.src = base64URL + + //set the type of the audio element based on the recorded audio's Blob type + let BlobType = recorderAudioAsBlob.type.includes(';') + ? recorderAudioAsBlob.type.substr(0, recorderAudioAsBlob.type.indexOf(';')) + : recorderAudioAsBlob.type + audioElementSource.type = BlobType + + //call the load method as it is used to update the audio element after changing the source or other settings + audioElement.load() + + //play the audio after successfully setting new src and type that corresponds to the recorded audio + console.log('Playing audio...') + audioElement.play() + + //Display text indicator of having the audio play in the background + displayTextIndicatorOfAudioPlaying() + } + + //read content and convert it to a URL (base64) + reader.readAsDataURL(recorderAudioAsBlob) +} + +/** Computes the elapsed recording time since the moment the function is called in the format h:m:s*/ +function handleElapsedRecordingTime() { + //display initial time when recording begins + displayElapsedTimeDuringAudioRecording('00:00') + + //create an interval that compute & displays elapsed time, as well as, animate red dot - every second + elapsedTimeTimer = setInterval(() => { + //compute the elapsed time every second + let elapsedTime = computeElapsedTime(audioRecordStartTime) //pass the actual record start time + //display the elapsed time + displayElapsedTimeDuringAudioRecording(elapsedTime) + }, 1000) //every second +} + +/** Display elapsed time during audio recording + * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss + */ +function displayElapsedTimeDuringAudioRecording(elapsedTime) { + //1. display the passed elapsed time as the elapsed time in the elapsedTime HTML element + elapsedTimeTag.innerHTML = elapsedTime + + //2. Stop the recording when the max number of hours is reached + if (elapsedTimeReachedMaximumNumberOfHours(elapsedTime)) { + stopAudioRecording() + } +} + +/** + * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss + * @returns {Boolean} whether the elapsed time reached the maximum number of hours or not + */ +function elapsedTimeReachedMaximumNumberOfHours(elapsedTime) { + //Split the elapsed time by the symbol that separates the hours, minutes and seconds : + let elapsedTimeSplit = elapsedTime.split(':') + + //Turn the maximum recording time in hours to a string and pad it with zero if less than 10 + let maximumRecordingTimeInHoursAsString = + maximumRecordingTimeInHours < 10 ? '0' + maximumRecordingTimeInHours : maximumRecordingTimeInHours.toString() + + //if the elapsed time reach hours and also reach the maximum recording time in hours return true + if (elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString) return true + //otherwise, return false + else return false +} + +/** Computes the elapsedTime since the moment the function is called in the format mm:ss or hh:mm:ss + * @param {String} startTime - start time to compute the elapsed time since + * @returns {String} elapsed time in mm:ss format or hh:mm:ss format, if elapsed hours are 0. + */ +function computeElapsedTime(startTime) { + //record end time + let endTime = new Date() + + //time difference in ms + let timeDiff = endTime - startTime + + //convert time difference from ms to seconds + timeDiff = timeDiff / 1000 + + //extract integer seconds that don't form a minute using % + let seconds = Math.floor(timeDiff % 60) //ignoring incomplete seconds (floor) + + //pad seconds with a zero if necessary + seconds = seconds < 10 ? '0' + seconds : seconds + + //convert time difference from seconds to minutes using % + timeDiff = Math.floor(timeDiff / 60) + + //extract integer minutes that don't form an hour using % + let minutes = timeDiff % 60 //no need to floor possible incomplete minutes, because they've been handled as seconds + minutes = minutes < 10 ? '0' + minutes : minutes + + //convert time difference from minutes to hours + timeDiff = Math.floor(timeDiff / 60) + + //extract integer hours that don't form a day using % + let hours = timeDiff % 24 //no need to floor possible incomplete hours, because they've been handled as seconds + + //convert time difference from hours to days + timeDiff = Math.floor(timeDiff / 24) + + // the rest of timeDiff is number of days + let days = timeDiff //add days to hours + + let totalHours = hours + days * 24 + totalHours = totalHours < 10 ? '0' + totalHours : totalHours + + if (totalHours === '00') { + return minutes + ':' + seconds + } else { + return totalHours + ':' + minutes + ':' + seconds + } +} + +//API to handle audio recording + +const audioRecorder = { + /** Stores the recorded audio as Blob objects of audio data as the recording continues*/ + audioBlobs: [] /*of type Blob[]*/, + /** Stores the reference of the MediaRecorder instance that handles the MediaStream when recording starts*/ + mediaRecorder: null /*of type MediaRecorder*/, + /** Stores the reference to the stream currently capturing the audio*/ + streamBeingCaptured: null /*of type MediaStream*/, + /** Start recording the audio + * @returns {Promise} - returns a promise that resolves if audio recording successfully started + */ + start: function () { + //Feature Detection + if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) { + //Feature is not supported in browser + //return a custom error + return Promise.reject(new Error('mediaDevices API or getUserMedia method is not supported in this browser.')) + } else { + //Feature is supported in browser + + //create an audio stream + return ( + navigator.mediaDevices + .getUserMedia({ audio: true } /*of type MediaStreamConstraints*/) + //returns a promise that resolves to the audio stream + .then((stream) /*of type MediaStream*/ => { + //save the reference of the stream to be able to stop it when necessary + audioRecorder.streamBeingCaptured = stream + + //create a media recorder instance by passing that stream into the MediaRecorder constructor + audioRecorder.mediaRecorder = new MediaRecorder(stream) /*the MediaRecorder interface of the MediaStream Recording + API provides functionality to easily record media*/ + + //clear previously saved audio Blobs, if any + audioRecorder.audioBlobs = [] + + //add a dataavailable event listener in order to store the audio data Blobs when recording + audioRecorder.mediaRecorder.addEventListener('dataavailable', (event) => { + //store audio Blob object + audioRecorder.audioBlobs.push(event.data) + }) + + //start the recording by calling the start method on the media recorder + audioRecorder.mediaRecorder.start() + }) + ) + + /* errors are not handled in the API because if its handled and the promise is chained, the .then after the catch will be executed*/ + } + }, + /** Stop the started audio recording + * @returns {Promise} - returns a promise that resolves to the audio as a blob file + */ + stop: function () { + //return a promise that would return the blob or URL of the recording + return new Promise((resolve) => { + //save audio type to pass to set the Blob type + let mimeType = audioRecorder.mediaRecorder.mimeType + + //listen to the stop event in order to create & return a single Blob object + audioRecorder.mediaRecorder.addEventListener('stop', () => { + //create a single blob object, as we might have gathered a few Blob objects that needs to be joined as one + let audioBlob = new Blob(audioRecorder.audioBlobs, { type: mimeType }) + + //resolve promise with the single audio blob representing the recorded audio + resolve(audioBlob) + }) + audioRecorder.cancel() + }) + }, + /** Cancel audio recording*/ + cancel: function () { + //stop the recording feature + audioRecorder.mediaRecorder.stop() + + //stop all the tracks on the active stream in order to stop the stream + audioRecorder.stopStream() + + //reset API properties for next recording + audioRecorder.resetRecordingProperties() + }, + /** Stop all the tracks on the active stream in order to stop the stream and remove + * the red flashing dot showing in the tab + */ + stopStream: function () { + //stopping the capturing request by stopping all the tracks on the active stream + audioRecorder.streamBeingCaptured + .getTracks() //get all tracks from the stream + .forEach((track) /*of type MediaStreamTrack*/ => track.stop()) //stop each one + }, + /** Reset all the recording properties including the media recorder and stream being captured*/ + resetRecordingProperties: function () { + audioRecorder.mediaRecorder = null + audioRecorder.streamBeingCaptured = null + + /*No need to remove event listeners attached to mediaRecorder as + If a DOM element which is removed is reference-free (no references pointing to it), the element itself is picked + up by the garbage collector as well as any event handlers/listeners associated with it. + getEventListeners(audioRecorder.mediaRecorder) will return an empty array of events.*/ + } +} diff --git a/packages/ui/src/views/chatmessage/audio-recording.css b/packages/ui/src/views/chatmessage/audio-recording.css new file mode 100644 index 00000000..5ba0fa50 --- /dev/null +++ b/packages/ui/src/views/chatmessage/audio-recording.css @@ -0,0 +1,278 @@ +/* style.css*/ + +/* Media Queries */ + +/* Small Devices*/ + +@media (min-width: 0px) { + * { + box-sizing: border-box; + } + .audio-recording-container { + width: 100%; + /* view port height*/ + /*targeting Chrome & Safari*/ + display: -webkit-flex; + /*targeting IE10*/ + display: -ms-flex; + display: flex; + flex-direction: column; + justify-content: center; + /*horizontal centering*/ + align-items: center; + } + .start-recording-button { + font-size: 70px; + color: #435f7a; + cursor: pointer; + } + .start-recording-button:hover { + opacity: 1; + } + .recording-control-buttons-container { + /*targeting Chrome & Safari*/ + display: -webkit-flex; + /*targeting IE10*/ + display: -ms-flex; + display: flex; + justify-content: space-evenly; + /*horizontal centering*/ + align-items: center; + width: 334px; + margin-bottom: 30px; + } + .cancel-recording-button, + .stop-recording-button { + font-size: 70px; + cursor: pointer; + } + .cancel-recording-button { + color: red; + opacity: 0.7; + } + .cancel-recording-button:hover { + color: rgb(206, 4, 4); + } + .stop-recording-button { + color: #33cc33; + opacity: 0.7; + } + .stop-recording-button:hover { + color: #27a527; + } + .recording-elapsed-time { + /*targeting Chrome & Safari*/ + display: -webkit-flex; + /*targeting IE10*/ + display: -ms-flex; + display: flex; + justify-content: center; + /*horizontal centering*/ + align-items: center; + } + .red-recording-dot { + font-size: 25px; + color: red; + margin-right: 12px; + /*transitions with Firefox, IE and Opera Support browser support*/ + animation-name: flashing-recording-dot; + -webkit-animation-name: flashing-recording-dot; + -moz-animation-name: flashing-recording-dot; + -o-animation-name: flashing-recording-dot; + animation-duration: 2s; + -webkit-animation-duration: 2s; + -moz-animation-duration: 2s; + -o-animation-duration: 2s; + animation-iteration-count: infinite; + -webkit-animation-iteration-count: infinite; + -moz-animation-iteration-count: infinite; + -o-animation-iteration-count: infinite; + } + /* The animation code */ + @keyframes flashing-recording-dot { + 0% { + opacity: 1; + } + 50% { + opacity: 0; + } + 100% { + opacity: 1; + } + } + @-webkit-keyframes flashing-recording-dot { + 0% { + opacity: 1; + } + 50% { + opacity: 0; + } + 100% { + opacity: 1; + } + } + @-moz-keyframes flashing-recording-dot { + 0% { + opacity: 1; + } + 50% { + opacity: 0; + } + 100% { + opacity: 1; + } + } + @-o-keyframes flashing-recording-dot { + 0% { + opacity: 1; + } + 50% { + opacity: 0; + } + 100% { + opacity: 1; + } + } + .elapsed-time { + font-size: 32px; + } + .recording-control-buttons-container.hide { + display: none; + } + .overlay { + position: absolute; + top: 0; + width: 100%; + background-color: rgba(82, 76, 76, 0.35); + /*targeting Chrome & Safari*/ + display: -webkit-flex; + /*targeting IE10*/ + display: -ms-flex; + display: flex; + justify-content: center; + /*horizontal centering*/ + align-items: center; + } + .overlay.hide { + display: none; + } + .browser-not-supporting-audio-recording-box { + /*targeting Chrome & Safari*/ + display: -webkit-flex; + /*targeting IE10*/ + display: -ms-flex; + display: flex; + flex-direction: column; + justify-content: space-between; + /*horizontal centering*/ + align-items: center; + width: 317px; + height: 119px; + background-color: white; + border-radius: 10px; + padding: 15px; + font-size: 16px; + } + .close-browser-not-supported-box { + cursor: pointer; + background-color: #abc1c05c; + border-radius: 10px; + font-size: 16px; + border: none; + } + .close-browser-not-supported-box:hover { + background-color: #92a5a45c; + } + .close-browser-not-supported-box:focus { + outline: none; + border: none; + } + .audio-element.hide { + display: none; + } + .text-indication-of-audio-playing-container { + height: 20px; + } + .text-indication-of-audio-playing { + font-size: 20px; + } + .text-indication-of-audio-playing.hide { + display: none; + } + /* 3 Dots animation*/ + .text-indication-of-audio-playing span { + /*transitions with Firefox, IE and Opera Support browser support*/ + animation-name: blinking-dot; + -webkit-animation-name: blinking-dot; + -moz-animation-name: blinking-dot; + -o-animation-name: blinking-dot; + animation-duration: 2s; + -webkit-animation-duration: 2s; + -moz-animation-duration: 2s; + -o-animation-duration: 2s; + animation-iteration-count: infinite; + -webkit-animation-iteration-count: infinite; + -moz-animation-iteration-count: infinite; + -o-animation-iteration-count: infinite; + } + .text-indication-of-audio-playing span:nth-child(2) { + animation-delay: .4s; + -webkit-animation-delay: .4s; + -moz-animation-delay: .4s; + -o-animation-delay: .4s; + } + .text-indication-of-audio-playing span:nth-child(3) { + animation-delay: .8s; + -webkit-animation-delay: .8s; + -moz-animation-delay: .8s; + -o-animation-delay: .8s; + } + /* The animation code */ + @keyframes blinking-dot { + 0% { + opacity: 0; + } + 50% { + opacity: 1; + } + 100% { + opacity: 0; + } + } + /* The animation code */ + @-webkit-keyframes blinking-dot { + 0% { + opacity: 0; + } + 50% { + opacity: 1; + } + 100% { + opacity: 0; + } + } + /* The animation code */ + @-moz-keyframes blinking-dot { + 0% { + opacity: 0; + } + 50% { + opacity: 1; + } + 100% { + opacity: 0; + } + } + /* The animation code */ + @-o-keyframes blinking-dot { + 0% { + opacity: 0; + } + 50% { + opacity: 1; + } + 100% { + opacity: 0; + } + } +} \ No newline at end of file diff --git a/packages/ui/src/views/chatmessage/audio-recording.js b/packages/ui/src/views/chatmessage/audio-recording.js new file mode 100644 index 00000000..395443fe --- /dev/null +++ b/packages/ui/src/views/chatmessage/audio-recording.js @@ -0,0 +1,433 @@ +// audio-recording.js --------------- +//View +let microphoneButton = document.getElementsByClassName('start-recording-button')[0] +let recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] +let stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] +let cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] +let elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] +let closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] +let overlay = document.getElementsByClassName('overlay')[0] +let audioElement = document.getElementsByClassName('audio-element')[0] +let audioElementSource = document.getElementsByClassName('audio-element')[0].getElementsByTagName('source')[0] +let textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] + +//Listeners + +//Listen to start recording button +microphoneButton.onclick = startAudioRecording + +//Listen to stop recording button +stopRecordingButton.onclick = stopAudioRecording + +//Listen to cancel recording button +cancelRecordingButton.onclick = cancelAudioRecording + +//Listen to when the ok button is clicked in the browser not supporting audio recording box +closeBrowserNotSupportedBoxButton.onclick = hideBrowserNotSupportedOverlay + +//Listen to when the audio being played ends +audioElement.onended = hideTextIndicatorOfAudioPlaying + +/** Displays recording control buttons */ +function handleDisplayingRecordingControlButtons() { + //Hide the microphone button that starts audio recording + microphoneButton.style.display = 'none' + + //Display the recording control buttons + recordingControlButtonsContainer.classList.remove('hide') + + //Handle the displaying of the elapsed recording time + handleElapsedRecordingTime() +} + +/** Hide the displayed recording control buttons */ +function handleHidingRecordingControlButtons() { + //Display the microphone button that starts audio recording + microphoneButton.style.display = 'block' + + //Hide the recording control buttons + recordingControlButtonsContainer.classList.add('hide') + + //stop interval that handles both time elapsed and the red dot + clearInterval(elapsedTimeTimer) +} + +/** Displays browser not supported info box for the user*/ +function displayBrowserNotSupportedOverlay() { + overlay.classList.remove('hide') +} + +/** Displays browser not supported info box for the user*/ +function hideBrowserNotSupportedOverlay() { + overlay.classList.add('hide') +} + +/** Creates a source element for the audio element in the HTML document*/ +function createSourceForAudioElement() { + let sourceElement = document.createElement('source') + audioElement.appendChild(sourceElement) + + audioElementSource = sourceElement +} + +/** Display the text indicator of the audio being playing in the background */ +function displayTextIndicatorOfAudioPlaying() { + textIndicatorOfAudiPlaying.classList.remove('hide') +} + +/** Hide the text indicator of the audio being playing in the background */ +function hideTextIndicatorOfAudioPlaying() { + textIndicatorOfAudiPlaying.classList.add('hide') +} + +//Controller + +/** Stores the actual start time when an audio recording begins to take place to ensure elapsed time start time is accurate*/ +let audioRecordStartTime + +/** Stores the maximum recording time in hours to stop recording once maximum recording hour has been reached */ +let maximumRecordingTimeInHours = 1 + +/** Stores the reference of the setInterval function that controls the timer in audio recording*/ +let elapsedTimeTimer + +/** Starts the audio recording*/ +function startAudioRecording() { + console.log('Recording Audio...') + + //If a previous audio recording is playing, pause it + let recorderAudioIsPlaying = !audioElement.paused // the paused property tells whether the media element is paused or not + console.log('paused?', !recorderAudioIsPlaying) + if (recorderAudioIsPlaying) { + audioElement.pause() + //also hide the audio playing indicator displayed on the screen + hideTextIndicatorOfAudioPlaying() + } + + //start recording using the audio recording API + audioRecorder + .start() + .then(() => { + //on success + + //store the recording start time to display the elapsed time according to it + audioRecordStartTime = new Date() + + //display control buttons to offer the functionality of stop and cancel + handleDisplayingRecordingControlButtons() + }) + .catch((error) => { + //on error + //No Browser Support Error + if (error.message.includes('mediaDevices API or getUserMedia method is not supported in this browser.')) { + console.log('To record audio, use browsers like Chrome and Firefox.') + displayBrowserNotSupportedOverlay() + } + + //Error handling structure + switch (error.name) { + case 'AbortError': //error from navigator.mediaDevices.getUserMedia + console.log('An AbortError has occurred.') + break + case 'NotAllowedError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotAllowedError has occurred. User might have denied permission.') + break + case 'NotFoundError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotFoundError has occurred.') + break + case 'NotReadableError': //error from navigator.mediaDevices.getUserMedia + console.log('A NotReadableError has occurred.') + break + case 'SecurityError': //error from navigator.mediaDevices.getUserMedia or from the MediaRecorder.start + console.log('A SecurityError has occurred.') + break + case 'TypeError': //error from navigator.mediaDevices.getUserMedia + console.log('A TypeError has occurred.') + break + case 'InvalidStateError': //error from the MediaRecorder.start + console.log('An InvalidStateError has occurred.') + break + case 'UnknownError': //error from the MediaRecorder.start + console.log('An UnknownError has occurred.') + break + default: + console.log('An error occurred with the error name ' + error.name) + } + }) +} +/** Stop the currently started audio recording & sends it + */ +function stopAudioRecording() { + console.log('Stopping Audio Recording...') + + //stop the recording using the audio recording API + audioRecorder + .stop() + .then((audioAsblob) => { + //Play recorder audio + playAudio(audioAsblob) + + //hide recording control button & return record icon + handleHidingRecordingControlButtons() + }) + .catch((error) => { + //Error handling structure + switch (error.name) { + case 'InvalidStateError': //error from the MediaRecorder.stop + console.log('An InvalidStateError has occurred.') + break + default: + console.log('An error occurred with the error name ' + error.name) + } + }) +} + +/** Cancel the currently started audio recording */ +function cancelAudioRecording() { + console.log('Canceling audio...') + + //cancel the recording using the audio recording API + audioRecorder.cancel() + + //hide recording control button & return record icon + handleHidingRecordingControlButtons() +} + +/** Plays recorded audio using the audio element in the HTML document + * @param {Blob} recorderAudioAsBlob - recorded audio as a Blob Object + */ +function playAudio(recorderAudioAsBlob) { + //read content of files (Blobs) asynchronously + let reader = new FileReader() + + //once content has been read + reader.onload = (e) => { + //store the base64 URL that represents the URL of the recording audio + let base64URL = e.target.result + + //If this is the first audio playing, create a source element + //as pre-populating the HTML with a source of empty src causes error + if (!audioElementSource) + //if it is not defined create it (happens first time only) + createSourceForAudioElement() + + //set the audio element's source using the base64 URL + audioElementSource.src = base64URL + + //set the type of the audio element based on the recorded audio's Blob type + let BlobType = recorderAudioAsBlob.type.includes(';') + ? recorderAudioAsBlob.type.substr(0, recorderAudioAsBlob.type.indexOf(';')) + : recorderAudioAsBlob.type + audioElementSource.type = BlobType + + //call the load method as it is used to update the audio element after changing the source or other settings + audioElement.load() + + //play the audio after successfully setting new src and type that corresponds to the recorded audio + console.log('Playing audio...') + audioElement.play() + + //Display text indicator of having the audio play in the background + displayTextIndicatorOfAudioPlaying() + } + + //read content and convert it to a URL (base64) + reader.readAsDataURL(recorderAudioAsBlob) +} + +/** Computes the elapsed recording time since the moment the function is called in the format h:m:s*/ +function handleElapsedRecordingTime() { + //display initial time when recording begins + displayElapsedTimeDuringAudioRecording('00:00') + + //create an interval that compute & displays elapsed time, as well as, animate red dot - every second + elapsedTimeTimer = setInterval(() => { + //compute the elapsed time every second + let elapsedTime = computeElapsedTime(audioRecordStartTime) //pass the actual record start time + //display the elapsed time + displayElapsedTimeDuringAudioRecording(elapsedTime) + }, 1000) //every second +} + +/** Display elapsed time during audio recording + * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss + */ +function displayElapsedTimeDuringAudioRecording(elapsedTime) { + //1. display the passed elapsed time as the elapsed time in the elapsedTime HTML element + elapsedTimeTag.innerHTML = elapsedTime + + //2. Stop the recording when the max number of hours is reached + if (elapsedTimeReachedMaximumNumberOfHours(elapsedTime)) { + stopAudioRecording() + } +} + +/** + * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss + * @returns {Boolean} whether the elapsed time reached the maximum number of hours or not + */ +function elapsedTimeReachedMaximumNumberOfHours(elapsedTime) { + //Split the elapsed time by the symbol that separates the hours, minutes and seconds : + let elapsedTimeSplit = elapsedTime.split(':') + + //Turn the maximum recording time in hours to a string and pad it with zero if less than 10 + let maximumRecordingTimeInHoursAsString = + maximumRecordingTimeInHours < 10 ? '0' + maximumRecordingTimeInHours : maximumRecordingTimeInHours.toString() + + //if the elapsed time reach hours and also reach the maximum recording time in hours return true + if (elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString) return true + //otherwise, return false + else return false +} + +/** Computes the elapsedTime since the moment the function is called in the format mm:ss or hh:mm:ss + * @param {String} startTime - start time to compute the elapsed time since + * @returns {String} elapsed time in mm:ss format or hh:mm:ss format, if elapsed hours are 0. + */ +function computeElapsedTime(startTime) { + //record end time + let endTime = new Date() + + //time difference in ms + let timeDiff = endTime - startTime + + //convert time difference from ms to seconds + timeDiff = timeDiff / 1000 + + //extract integer seconds that don't form a minute using % + let seconds = Math.floor(timeDiff % 60) //ignoring incomplete seconds (floor) + + //pad seconds with a zero if necessary + seconds = seconds < 10 ? '0' + seconds : seconds + + //convert time difference from seconds to minutes using % + timeDiff = Math.floor(timeDiff / 60) + + //extract integer minutes that don't form an hour using % + let minutes = timeDiff % 60 //no need to floor possible incomplete minutes, because they've been handled as seconds + minutes = minutes < 10 ? '0' + minutes : minutes + + //convert time difference from minutes to hours + timeDiff = Math.floor(timeDiff / 60) + + //extract integer hours that don't form a day using % + let hours = timeDiff % 24 //no need to floor possible incomplete hours, because they've been handled as seconds + + //convert time difference from hours to days + timeDiff = Math.floor(timeDiff / 24) + + // the rest of timeDiff is number of days + let days = timeDiff //add days to hours + + let totalHours = hours + days * 24 + totalHours = totalHours < 10 ? '0' + totalHours : totalHours + + if (totalHours === '00') { + return minutes + ':' + seconds + } else { + return totalHours + ':' + minutes + ':' + seconds + } +} + +//API to handle audio recording + +const audioRecorder = { + /** Stores the recorded audio as Blob objects of audio data as the recording continues*/ + audioBlobs: [] /*of type Blob[]*/, + /** Stores the reference of the MediaRecorder instance that handles the MediaStream when recording starts*/ + mediaRecorder: null /*of type MediaRecorder*/, + /** Stores the reference to the stream currently capturing the audio*/ + streamBeingCaptured: null /*of type MediaStream*/, + /** Start recording the audio + * @returns {Promise} - returns a promise that resolves if audio recording successfully started + */ + start: function () { + //Feature Detection + if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) { + //Feature is not supported in browser + //return a custom error + return Promise.reject(new Error('mediaDevices API or getUserMedia method is not supported in this browser.')) + } else { + //Feature is supported in browser + + //create an audio stream + return ( + navigator.mediaDevices + .getUserMedia({ audio: true } /*of type MediaStreamConstraints*/) + //returns a promise that resolves to the audio stream + .then((stream) /*of type MediaStream*/ => { + //save the reference of the stream to be able to stop it when necessary + audioRecorder.streamBeingCaptured = stream + + //create a media recorder instance by passing that stream into the MediaRecorder constructor + audioRecorder.mediaRecorder = new MediaRecorder(stream) /*the MediaRecorder interface of the MediaStream Recording + API provides functionality to easily record media*/ + + //clear previously saved audio Blobs, if any + audioRecorder.audioBlobs = [] + + //add a dataavailable event listener in order to store the audio data Blobs when recording + audioRecorder.mediaRecorder.addEventListener('dataavailable', (event) => { + //store audio Blob object + audioRecorder.audioBlobs.push(event.data) + }) + + //start the recording by calling the start method on the media recorder + audioRecorder.mediaRecorder.start() + }) + ) + + /* errors are not handled in the API because if its handled and the promise is chained, the .then after the catch will be executed*/ + } + }, + /** Stop the started audio recording + * @returns {Promise} - returns a promise that resolves to the audio as a blob file + */ + stop: function () { + //return a promise that would return the blob or URL of the recording + return new Promise((resolve) => { + //save audio type to pass to set the Blob type + let mimeType = audioRecorder.mediaRecorder.mimeType + + //listen to the stop event in order to create & return a single Blob object + audioRecorder.mediaRecorder.addEventListener('stop', () => { + //create a single blob object, as we might have gathered a few Blob objects that needs to be joined as one + let audioBlob = new Blob(audioRecorder.audioBlobs, { type: mimeType }) + + //resolve promise with the single audio blob representing the recorded audio + resolve(audioBlob) + }) + audioRecorder.cancel() + }) + }, + /** Cancel audio recording*/ + cancel: function () { + //stop the recording feature + audioRecorder.mediaRecorder.stop() + + //stop all the tracks on the active stream in order to stop the stream + audioRecorder.stopStream() + + //reset API properties for next recording + audioRecorder.resetRecordingProperties() + }, + /** Stop all the tracks on the active stream in order to stop the stream and remove + * the red flashing dot showing in the tab + */ + stopStream: function () { + //stopping the capturing request by stopping all the tracks on the active stream + audioRecorder.streamBeingCaptured + .getTracks() //get all tracks from the stream + .forEach((track) /*of type MediaStreamTrack*/ => track.stop()) //stop each one + }, + /** Reset all the recording properties including the media recorder and stream being captured*/ + resetRecordingProperties: function () { + audioRecorder.mediaRecorder = null + audioRecorder.streamBeingCaptured = null + + /*No need to remove event listeners attached to mediaRecorder as + If a DOM element which is removed is reference-free (no references pointing to it), the element itself is picked + up by the garbage collector as well as any event handlers/listeners associated with it. + getEventListeners(audioRecorder.mediaRecorder) will return an empty array of events.*/ + } +} From 826de70c6c73560b88dce0d7c070fc0947280aeb Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 15 Dec 2023 13:21:10 +0530 Subject: [PATCH 09/62] MultiModal: addition of live recording... --- .../ui/src/views/chatmessage/ChatMessage.js | 579 +++--------------- .../src/views/chatmessage/audio-recording.css | 3 + .../src/views/chatmessage/audio-recording.js | 167 +---- 3 files changed, 118 insertions(+), 631 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index d7bbaf9e..7e0092cd 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -26,16 +26,7 @@ import { Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' -import { - IconDownload, - IconSend, - IconUpload, - IconMicrophone, - IconPhotoPlus, - IconPlayerStop, - IconPlayerRecord, - IconCircleDot -} from '@tabler/icons' +import { IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconCircleDot } from '@tabler/icons' // project import import { CodeBlock } from 'ui-component/markdown/CodeBlock' @@ -59,6 +50,7 @@ import robotPNG from 'assets/images/robot.png' import userPNG from 'assets/images/account.png' import { isValidURL, removeDuplicateURL, setLocalStorageChatflow } from 'utils/genericHelper' import DeleteIcon from '@mui/icons-material/Delete' +import { cancelAudioRecording, startAudioRecording, stopAudioRecording } from './audio-recording' export const ChatMessage = ({ open, chatflowid, isDialog }) => { const theme = useTheme() @@ -84,11 +76,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow) const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming) + // drag & drop and file input const fileUploadRef = useRef(null) const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads) const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false) const [previews, setPreviews] = useState([]) const [isDragOver, setIsDragOver] = useState(false) + + // recording + const [isRecording, setIsRecording] = useState(false) + const [recordingNotSupported, setRecordingNotSupported] = useState(false) + const handleDragOver = (e) => { if (!isChatFlowAvailableForUploads) { return @@ -227,6 +225,24 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { event.target.value = null } + const addRecordingToPreviews = (blob) => { + const mimeType = blob.type.substring(0, blob.type.indexOf(';')) + // read blob and add to previews + const reader = new FileReader() + reader.readAsDataURL(blob) + reader.onloadend = () => { + const base64data = reader.result + const upload = { + data: base64data, + preview: audioUploadSVG, + type: 'audio', + name: 'audio.wav', + mime: mimeType + } + setPreviews((prevPreviews) => [...prevPreviews, upload]) + } + } + const handleDragEnter = (e) => { if (isChatFlowAvailableForUploads) { e.preventDefault() @@ -271,6 +287,21 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { setPreviews([]) } + const onMicrophonePressed = () => { + setIsRecording(true) + startAudioRecording(setIsRecording, setRecordingNotSupported) + } + const onRecordingCancelled = () => { + cancelAudioRecording() + setIsRecording(false) + setRecordingNotSupported(false) + } + const onRecordingStopped = () => { + stopAudioRecording(addRecordingToPreviews) + setIsRecording(false) + setRecordingNotSupported(false) + } + const onSourceDialogClick = (data, title) => { setSourceDialogProps({ data, title }) setSourceDialogOpen(true) @@ -487,8 +518,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { getIsChatflowStreamingApi.request(chatflowid) getAllowChatFlowUploads.request(chatflowid) scrollToBottom() - initAudioRecording() - + setIsRecording(false) socket = socketIOClient(baseURL) socket.on('connect', () => { @@ -530,39 +560,6 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { onDrop={handleDrop} className={`file-drop-field`} > -
-
- -
- -

-
- -
-
-

- Audio is playing. - . - . -

-
-
-
-
-

To record audio, use browsers like Chrome and Firefox that support audio recording.

- -
-
- {/* eslint-disable-next-line jsx-a11y/media-has-caption */} - {isDragOver && getAllowChatFlowUploads.data?.allowUploads && ( Drop here to upload @@ -576,6 +573,41 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { })} )} + {isRecording && ( + +
+ Recording +
+ + + +
+ + + +

00:00

+
+ + + +
+
+ {recordingNotSupported && ( +
+
+

To record audio, use browsers like Chrome and Firefox that support audio recording.

+ +
+
+ )} +
+ )}
{messages && @@ -804,9 +836,15 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { endAdornment={ <> {isChatFlowAvailableForUploads && ( - - + + onMicrophonePressed()} + type='button' + disabled={loading || !chatflowid} + edge='end' + > {
- setSourceDialogOpen(false)} /> ) @@ -850,449 +887,3 @@ ChatMessage.propTypes = { chatflowid: PropTypes.string, isDialog: PropTypes.bool } - -// audio-recording.js --------------- -//View -let microphoneButton = document.getElementsByClassName('start-recording-button')[0] -let recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] -let stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] -let cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] -let elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] -let closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] -let overlay = document.getElementsByClassName('overlay')[0] -let audioElement = document.getElementsByClassName('audio-element')[0] -let audioElementSource = audioElement?.getElementsByTagName('source')[0] -let textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] - -const initAudioRecording = () => { - microphoneButton = document.getElementsByClassName('start-recording-button')[0] - recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] - stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] - cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] - elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] - closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] - overlay = document.getElementsByClassName('overlay')[0] - audioElement = document.getElementsByClassName('audio-element')[0] - audioElementSource = audioElement?.getElementsByTagName('source')[0] - textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] - //Listeners - - //Listen to start recording button - if (microphoneButton) microphoneButton.onclick = startAudioRecording - - //Listen to stop recording button - if (stopRecordingButton) stopRecordingButton.onclick = stopAudioRecording - - //Listen to cancel recording button - if (cancelRecordingButton) cancelRecordingButton.onclick = cancelAudioRecording - - //Listen to when the ok button is clicked in the browser not supporting audio recording box - if (closeBrowserNotSupportedBoxButton) closeBrowserNotSupportedBoxButton.onclick = hideBrowserNotSupportedOverlay - - //Listen to when the audio being played ends - if (audioElement) audioElement.onended = hideTextIndicatorOfAudioPlaying -} - -/** Displays recording control buttons */ -function handleDisplayingRecordingControlButtons() { - //Hide the microphone button that starts audio recording - microphoneButton.style.display = 'none' - - //Display the recording control buttons - recordingControlButtonsContainer.classList.remove('hide') - - //Handle the displaying of the elapsed recording time - handleElapsedRecordingTime() -} - -/** Hide the displayed recording control buttons */ -function handleHidingRecordingControlButtons() { - //Display the microphone button that starts audio recording - microphoneButton.style.display = 'block' - - //Hide the recording control buttons - recordingControlButtonsContainer.classList.add('hide') - - //stop interval that handles both time elapsed and the red dot - clearInterval(elapsedTimeTimer) -} - -/** Displays browser not supported info box for the user*/ -function displayBrowserNotSupportedOverlay() { - overlay.classList.remove('hide') -} - -/** Displays browser not supported info box for the user*/ -function hideBrowserNotSupportedOverlay() { - overlay.classList.add('hide') -} - -/** Creates a source element for the audio element in the HTML document*/ -function createSourceForAudioElement() { - let sourceElement = document.createElement('source') - audioElement.appendChild(sourceElement) - - audioElementSource = sourceElement -} - -/** Display the text indicator of the audio being playing in the background */ -function displayTextIndicatorOfAudioPlaying() { - textIndicatorOfAudiPlaying.classList.remove('hide') -} - -/** Hide the text indicator of the audio being playing in the background */ -function hideTextIndicatorOfAudioPlaying() { - textIndicatorOfAudiPlaying.classList.add('hide') -} - -//Controller - -/** Stores the actual start time when an audio recording begins to take place to ensure elapsed time start time is accurate*/ -let audioRecordStartTime - -/** Stores the maximum recording time in hours to stop recording once maximum recording hour has been reached */ -let maximumRecordingTimeInHours = 1 - -/** Stores the reference of the setInterval function that controls the timer in audio recording*/ -let elapsedTimeTimer - -/** Starts the audio recording*/ -function startAudioRecording() { - console.log('Recording Audio...') - - //If a previous audio recording is playing, pause it - let recorderAudioIsPlaying = !audioElement.paused // the paused property tells whether the media element is paused or not - console.log('paused?', !recorderAudioIsPlaying) - if (recorderAudioIsPlaying) { - audioElement.pause() - //also hide the audio playing indicator displayed on the screen - hideTextIndicatorOfAudioPlaying() - } - - //start recording using the audio recording API - audioRecorder - .start() - .then(() => { - //on success - - //store the recording start time to display the elapsed time according to it - audioRecordStartTime = new Date() - - //display control buttons to offer the functionality of stop and cancel - handleDisplayingRecordingControlButtons() - }) - .catch((error) => { - //on error - //No Browser Support Error - if (error.message.includes('mediaDevices API or getUserMedia method is not supported in this browser.')) { - console.log('To record audio, use browsers like Chrome and Firefox.') - displayBrowserNotSupportedOverlay() - } - - //Error handling structure - switch (error.name) { - case 'AbortError': //error from navigator.mediaDevices.getUserMedia - console.log('An AbortError has occurred.') - break - case 'NotAllowedError': //error from navigator.mediaDevices.getUserMedia - console.log('A NotAllowedError has occurred. User might have denied permission.') - break - case 'NotFoundError': //error from navigator.mediaDevices.getUserMedia - console.log('A NotFoundError has occurred.') - break - case 'NotReadableError': //error from navigator.mediaDevices.getUserMedia - console.log('A NotReadableError has occurred.') - break - case 'SecurityError': //error from navigator.mediaDevices.getUserMedia or from the MediaRecorder.start - console.log('A SecurityError has occurred.') - break - case 'TypeError': //error from navigator.mediaDevices.getUserMedia - console.log('A TypeError has occurred.') - break - case 'InvalidStateError': //error from the MediaRecorder.start - console.log('An InvalidStateError has occurred.') - break - case 'UnknownError': //error from the MediaRecorder.start - console.log('An UnknownError has occurred.') - break - default: - console.log('An error occurred with the error name ' + error.name) - } - }) -} -/** Stop the currently started audio recording & sends it - */ -function stopAudioRecording() { - console.log('Stopping Audio Recording...') - - //stop the recording using the audio recording API - audioRecorder - .stop() - .then((audioAsblob) => { - //Play recorder audio - playAudio(audioAsblob) - - //hide recording control button & return record icon - handleHidingRecordingControlButtons() - }) - .catch((error) => { - //Error handling structure - switch (error.name) { - case 'InvalidStateError': //error from the MediaRecorder.stop - console.log('An InvalidStateError has occurred.') - break - default: - console.log('An error occurred with the error name ' + error.name) - } - }) -} - -/** Cancel the currently started audio recording */ -function cancelAudioRecording() { - console.log('Canceling audio...') - - //cancel the recording using the audio recording API - audioRecorder.cancel() - - //hide recording control button & return record icon - handleHidingRecordingControlButtons() -} - -/** Plays recorded audio using the audio element in the HTML document - * @param {Blob} recorderAudioAsBlob - recorded audio as a Blob Object - */ -function playAudio(recorderAudioAsBlob) { - //read content of files (Blobs) asynchronously - let reader = new FileReader() - - //once content has been read - reader.onload = (e) => { - //store the base64 URL that represents the URL of the recording audio - let base64URL = e.target.result - - //If this is the first audio playing, create a source element - //as pre-populating the HTML with a source of empty src causes error - if (!audioElementSource) - //if it is not defined create it (happens first time only) - createSourceForAudioElement() - - //set the audio element's source using the base64 URL - audioElementSource.src = base64URL - - //set the type of the audio element based on the recorded audio's Blob type - let BlobType = recorderAudioAsBlob.type.includes(';') - ? recorderAudioAsBlob.type.substr(0, recorderAudioAsBlob.type.indexOf(';')) - : recorderAudioAsBlob.type - audioElementSource.type = BlobType - - //call the load method as it is used to update the audio element after changing the source or other settings - audioElement.load() - - //play the audio after successfully setting new src and type that corresponds to the recorded audio - console.log('Playing audio...') - audioElement.play() - - //Display text indicator of having the audio play in the background - displayTextIndicatorOfAudioPlaying() - } - - //read content and convert it to a URL (base64) - reader.readAsDataURL(recorderAudioAsBlob) -} - -/** Computes the elapsed recording time since the moment the function is called in the format h:m:s*/ -function handleElapsedRecordingTime() { - //display initial time when recording begins - displayElapsedTimeDuringAudioRecording('00:00') - - //create an interval that compute & displays elapsed time, as well as, animate red dot - every second - elapsedTimeTimer = setInterval(() => { - //compute the elapsed time every second - let elapsedTime = computeElapsedTime(audioRecordStartTime) //pass the actual record start time - //display the elapsed time - displayElapsedTimeDuringAudioRecording(elapsedTime) - }, 1000) //every second -} - -/** Display elapsed time during audio recording - * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss - */ -function displayElapsedTimeDuringAudioRecording(elapsedTime) { - //1. display the passed elapsed time as the elapsed time in the elapsedTime HTML element - elapsedTimeTag.innerHTML = elapsedTime - - //2. Stop the recording when the max number of hours is reached - if (elapsedTimeReachedMaximumNumberOfHours(elapsedTime)) { - stopAudioRecording() - } -} - -/** - * @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss - * @returns {Boolean} whether the elapsed time reached the maximum number of hours or not - */ -function elapsedTimeReachedMaximumNumberOfHours(elapsedTime) { - //Split the elapsed time by the symbol that separates the hours, minutes and seconds : - let elapsedTimeSplit = elapsedTime.split(':') - - //Turn the maximum recording time in hours to a string and pad it with zero if less than 10 - let maximumRecordingTimeInHoursAsString = - maximumRecordingTimeInHours < 10 ? '0' + maximumRecordingTimeInHours : maximumRecordingTimeInHours.toString() - - //if the elapsed time reach hours and also reach the maximum recording time in hours return true - if (elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString) return true - //otherwise, return false - else return false -} - -/** Computes the elapsedTime since the moment the function is called in the format mm:ss or hh:mm:ss - * @param {String} startTime - start time to compute the elapsed time since - * @returns {String} elapsed time in mm:ss format or hh:mm:ss format, if elapsed hours are 0. - */ -function computeElapsedTime(startTime) { - //record end time - let endTime = new Date() - - //time difference in ms - let timeDiff = endTime - startTime - - //convert time difference from ms to seconds - timeDiff = timeDiff / 1000 - - //extract integer seconds that don't form a minute using % - let seconds = Math.floor(timeDiff % 60) //ignoring incomplete seconds (floor) - - //pad seconds with a zero if necessary - seconds = seconds < 10 ? '0' + seconds : seconds - - //convert time difference from seconds to minutes using % - timeDiff = Math.floor(timeDiff / 60) - - //extract integer minutes that don't form an hour using % - let minutes = timeDiff % 60 //no need to floor possible incomplete minutes, because they've been handled as seconds - minutes = minutes < 10 ? '0' + minutes : minutes - - //convert time difference from minutes to hours - timeDiff = Math.floor(timeDiff / 60) - - //extract integer hours that don't form a day using % - let hours = timeDiff % 24 //no need to floor possible incomplete hours, because they've been handled as seconds - - //convert time difference from hours to days - timeDiff = Math.floor(timeDiff / 24) - - // the rest of timeDiff is number of days - let days = timeDiff //add days to hours - - let totalHours = hours + days * 24 - totalHours = totalHours < 10 ? '0' + totalHours : totalHours - - if (totalHours === '00') { - return minutes + ':' + seconds - } else { - return totalHours + ':' + minutes + ':' + seconds - } -} - -//API to handle audio recording - -const audioRecorder = { - /** Stores the recorded audio as Blob objects of audio data as the recording continues*/ - audioBlobs: [] /*of type Blob[]*/, - /** Stores the reference of the MediaRecorder instance that handles the MediaStream when recording starts*/ - mediaRecorder: null /*of type MediaRecorder*/, - /** Stores the reference to the stream currently capturing the audio*/ - streamBeingCaptured: null /*of type MediaStream*/, - /** Start recording the audio - * @returns {Promise} - returns a promise that resolves if audio recording successfully started - */ - start: function () { - //Feature Detection - if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) { - //Feature is not supported in browser - //return a custom error - return Promise.reject(new Error('mediaDevices API or getUserMedia method is not supported in this browser.')) - } else { - //Feature is supported in browser - - //create an audio stream - return ( - navigator.mediaDevices - .getUserMedia({ audio: true } /*of type MediaStreamConstraints*/) - //returns a promise that resolves to the audio stream - .then((stream) /*of type MediaStream*/ => { - //save the reference of the stream to be able to stop it when necessary - audioRecorder.streamBeingCaptured = stream - - //create a media recorder instance by passing that stream into the MediaRecorder constructor - audioRecorder.mediaRecorder = new MediaRecorder(stream) /*the MediaRecorder interface of the MediaStream Recording - API provides functionality to easily record media*/ - - //clear previously saved audio Blobs, if any - audioRecorder.audioBlobs = [] - - //add a dataavailable event listener in order to store the audio data Blobs when recording - audioRecorder.mediaRecorder.addEventListener('dataavailable', (event) => { - //store audio Blob object - audioRecorder.audioBlobs.push(event.data) - }) - - //start the recording by calling the start method on the media recorder - audioRecorder.mediaRecorder.start() - }) - ) - - /* errors are not handled in the API because if its handled and the promise is chained, the .then after the catch will be executed*/ - } - }, - /** Stop the started audio recording - * @returns {Promise} - returns a promise that resolves to the audio as a blob file - */ - stop: function () { - //return a promise that would return the blob or URL of the recording - return new Promise((resolve) => { - //save audio type to pass to set the Blob type - let mimeType = audioRecorder.mediaRecorder.mimeType - - //listen to the stop event in order to create & return a single Blob object - audioRecorder.mediaRecorder.addEventListener('stop', () => { - //create a single blob object, as we might have gathered a few Blob objects that needs to be joined as one - let audioBlob = new Blob(audioRecorder.audioBlobs, { type: mimeType }) - - //resolve promise with the single audio blob representing the recorded audio - resolve(audioBlob) - }) - audioRecorder.cancel() - }) - }, - /** Cancel audio recording*/ - cancel: function () { - //stop the recording feature - audioRecorder.mediaRecorder.stop() - - //stop all the tracks on the active stream in order to stop the stream - audioRecorder.stopStream() - - //reset API properties for next recording - audioRecorder.resetRecordingProperties() - }, - /** Stop all the tracks on the active stream in order to stop the stream and remove - * the red flashing dot showing in the tab - */ - stopStream: function () { - //stopping the capturing request by stopping all the tracks on the active stream - audioRecorder.streamBeingCaptured - .getTracks() //get all tracks from the stream - .forEach((track) /*of type MediaStreamTrack*/ => track.stop()) //stop each one - }, - /** Reset all the recording properties including the media recorder and stream being captured*/ - resetRecordingProperties: function () { - audioRecorder.mediaRecorder = null - audioRecorder.streamBeingCaptured = null - - /*No need to remove event listeners attached to mediaRecorder as - If a DOM element which is removed is reference-free (no references pointing to it), the element itself is picked - up by the garbage collector as well as any event handlers/listeners associated with it. - getEventListeners(audioRecorder.mediaRecorder) will return an empty array of events.*/ - } -} diff --git a/packages/ui/src/views/chatmessage/audio-recording.css b/packages/ui/src/views/chatmessage/audio-recording.css index 5ba0fa50..fbca2f60 100644 --- a/packages/ui/src/views/chatmessage/audio-recording.css +++ b/packages/ui/src/views/chatmessage/audio-recording.css @@ -20,6 +20,7 @@ justify-content: center; /*horizontal centering*/ align-items: center; + background-color: white; } .start-recording-button { font-size: 70px; @@ -40,6 +41,7 @@ align-items: center; width: 334px; margin-bottom: 30px; + background-color: white; } .cancel-recording-button, .stop-recording-button { @@ -61,6 +63,7 @@ color: #27a527; } .recording-elapsed-time { + font-size: 32px; /*targeting Chrome & Safari*/ display: -webkit-flex; /*targeting IE10*/ diff --git a/packages/ui/src/views/chatmessage/audio-recording.js b/packages/ui/src/views/chatmessage/audio-recording.js index 395443fe..f5cba001 100644 --- a/packages/ui/src/views/chatmessage/audio-recording.js +++ b/packages/ui/src/views/chatmessage/audio-recording.js @@ -1,41 +1,21 @@ +/** + * @fileoverview This file contains the API to handle audio recording. + * Originally from 'https://ralzohairi.medium.com/audio-recording-in-javascript-96eed45b75ee' + */ + // audio-recording.js --------------- -//View -let microphoneButton = document.getElementsByClassName('start-recording-button')[0] -let recordingControlButtonsContainer = document.getElementsByClassName('recording-control-buttons-container')[0] -let stopRecordingButton = document.getElementsByClassName('stop-recording-button')[0] -let cancelRecordingButton = document.getElementsByClassName('cancel-recording-button')[0] -let elapsedTimeTag = document.getElementsByClassName('elapsed-time')[0] -let closeBrowserNotSupportedBoxButton = document.getElementsByClassName('close-browser-not-supported-box')[0] -let overlay = document.getElementsByClassName('overlay')[0] -let audioElement = document.getElementsByClassName('audio-element')[0] -let audioElementSource = document.getElementsByClassName('audio-element')[0].getElementsByTagName('source')[0] -let textIndicatorOfAudiPlaying = document.getElementsByClassName('text-indication-of-audio-playing')[0] +let microphoneButton, elapsedTimeTag -//Listeners - -//Listen to start recording button -microphoneButton.onclick = startAudioRecording - -//Listen to stop recording button -stopRecordingButton.onclick = stopAudioRecording - -//Listen to cancel recording button -cancelRecordingButton.onclick = cancelAudioRecording - -//Listen to when the ok button is clicked in the browser not supporting audio recording box -closeBrowserNotSupportedBoxButton.onclick = hideBrowserNotSupportedOverlay - -//Listen to when the audio being played ends -audioElement.onended = hideTextIndicatorOfAudioPlaying +/** Initialize controls */ +function initializeControls() { + microphoneButton = document.getElementsByClassName('start-recording-button')[0] +} /** Displays recording control buttons */ function handleDisplayingRecordingControlButtons() { //Hide the microphone button that starts audio recording microphoneButton.style.display = 'none' - //Display the recording control buttons - recordingControlButtonsContainer.classList.remove('hide') - //Handle the displaying of the elapsed recording time handleElapsedRecordingTime() } @@ -45,43 +25,10 @@ function handleHidingRecordingControlButtons() { //Display the microphone button that starts audio recording microphoneButton.style.display = 'block' - //Hide the recording control buttons - recordingControlButtonsContainer.classList.add('hide') - //stop interval that handles both time elapsed and the red dot clearInterval(elapsedTimeTimer) } -/** Displays browser not supported info box for the user*/ -function displayBrowserNotSupportedOverlay() { - overlay.classList.remove('hide') -} - -/** Displays browser not supported info box for the user*/ -function hideBrowserNotSupportedOverlay() { - overlay.classList.add('hide') -} - -/** Creates a source element for the audio element in the HTML document*/ -function createSourceForAudioElement() { - let sourceElement = document.createElement('source') - audioElement.appendChild(sourceElement) - - audioElementSource = sourceElement -} - -/** Display the text indicator of the audio being playing in the background */ -function displayTextIndicatorOfAudioPlaying() { - textIndicatorOfAudiPlaying.classList.remove('hide') -} - -/** Hide the text indicator of the audio being playing in the background */ -function hideTextIndicatorOfAudioPlaying() { - textIndicatorOfAudiPlaying.classList.add('hide') -} - -//Controller - /** Stores the actual start time when an audio recording begins to take place to ensure elapsed time start time is accurate*/ let audioRecordStartTime @@ -92,24 +39,17 @@ let maximumRecordingTimeInHours = 1 let elapsedTimeTimer /** Starts the audio recording*/ -function startAudioRecording() { - console.log('Recording Audio...') - - //If a previous audio recording is playing, pause it - let recorderAudioIsPlaying = !audioElement.paused // the paused property tells whether the media element is paused or not - console.log('paused?', !recorderAudioIsPlaying) - if (recorderAudioIsPlaying) { - audioElement.pause() - //also hide the audio playing indicator displayed on the screen - hideTextIndicatorOfAudioPlaying() - } +export function startAudioRecording(onRecordingStart, onUnsupportedBrowser) { + initializeControls() //start recording using the audio recording API audioRecorder .start() .then(() => { - //on success - + //on success show the controls to stop and cancel the recording + if (onRecordingStart) { + onRecordingStart(true) + } //store the recording start time to display the elapsed time according to it audioRecordStartTime = new Date() @@ -120,8 +60,9 @@ function startAudioRecording() { //on error //No Browser Support Error if (error.message.includes('mediaDevices API or getUserMedia method is not supported in this browser.')) { - console.log('To record audio, use browsers like Chrome and Firefox.') - displayBrowserNotSupportedOverlay() + if (onUnsupportedBrowser) { + onUnsupportedBrowser(true) + } } //Error handling structure @@ -157,18 +98,16 @@ function startAudioRecording() { } /** Stop the currently started audio recording & sends it */ -function stopAudioRecording() { - console.log('Stopping Audio Recording...') - +export function stopAudioRecording(addRecordingToPreviews) { //stop the recording using the audio recording API audioRecorder .stop() - .then((audioAsblob) => { - //Play recorder audio - playAudio(audioAsblob) - + .then((audioBlob) => { //hide recording control button & return record icon handleHidingRecordingControlButtons() + if (addRecordingToPreviews) { + addRecordingToPreviews(audioBlob) + } }) .catch((error) => { //Error handling structure @@ -183,9 +122,7 @@ function stopAudioRecording() { } /** Cancel the currently started audio recording */ -function cancelAudioRecording() { - console.log('Canceling audio...') - +export function cancelAudioRecording() { //cancel the recording using the audio recording API audioRecorder.cancel() @@ -193,50 +130,9 @@ function cancelAudioRecording() { handleHidingRecordingControlButtons() } -/** Plays recorded audio using the audio element in the HTML document - * @param {Blob} recorderAudioAsBlob - recorded audio as a Blob Object - */ -function playAudio(recorderAudioAsBlob) { - //read content of files (Blobs) asynchronously - let reader = new FileReader() - - //once content has been read - reader.onload = (e) => { - //store the base64 URL that represents the URL of the recording audio - let base64URL = e.target.result - - //If this is the first audio playing, create a source element - //as pre-populating the HTML with a source of empty src causes error - if (!audioElementSource) - //if it is not defined create it (happens first time only) - createSourceForAudioElement() - - //set the audio element's source using the base64 URL - audioElementSource.src = base64URL - - //set the type of the audio element based on the recorded audio's Blob type - let BlobType = recorderAudioAsBlob.type.includes(';') - ? recorderAudioAsBlob.type.substr(0, recorderAudioAsBlob.type.indexOf(';')) - : recorderAudioAsBlob.type - audioElementSource.type = BlobType - - //call the load method as it is used to update the audio element after changing the source or other settings - audioElement.load() - - //play the audio after successfully setting new src and type that corresponds to the recorded audio - console.log('Playing audio...') - audioElement.play() - - //Display text indicator of having the audio play in the background - displayTextIndicatorOfAudioPlaying() - } - - //read content and convert it to a URL (base64) - reader.readAsDataURL(recorderAudioAsBlob) -} - /** Computes the elapsed recording time since the moment the function is called in the format h:m:s*/ function handleElapsedRecordingTime() { + elapsedTimeTag = document.getElementById('elapsed-time') //display initial time when recording begins displayElapsedTimeDuringAudioRecording('00:00') @@ -255,7 +151,6 @@ function handleElapsedRecordingTime() { function displayElapsedTimeDuringAudioRecording(elapsedTime) { //1. display the passed elapsed time as the elapsed time in the elapsedTime HTML element elapsedTimeTag.innerHTML = elapsedTime - //2. Stop the recording when the max number of hours is reached if (elapsedTimeReachedMaximumNumberOfHours(elapsedTime)) { stopAudioRecording() @@ -275,9 +170,7 @@ function elapsedTimeReachedMaximumNumberOfHours(elapsedTime) { maximumRecordingTimeInHours < 10 ? '0' + maximumRecordingTimeInHours : maximumRecordingTimeInHours.toString() //if the elapsed time reach hours and also reach the maximum recording time in hours return true - if (elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString) return true - //otherwise, return false - else return false + return elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString } /** Computes the elapsedTime since the moment the function is called in the format mm:ss or hh:mm:ss @@ -331,7 +224,7 @@ function computeElapsedTime(startTime) { //API to handle audio recording -const audioRecorder = { +export const audioRecorder = { /** Stores the recorded audio as Blob objects of audio data as the recording continues*/ audioBlobs: [] /*of type Blob[]*/, /** Stores the reference of the MediaRecorder instance that handles the MediaStream when recording starts*/ @@ -360,8 +253,8 @@ const audioRecorder = { audioRecorder.streamBeingCaptured = stream //create a media recorder instance by passing that stream into the MediaRecorder constructor - audioRecorder.mediaRecorder = new MediaRecorder(stream) /*the MediaRecorder interface of the MediaStream Recording - API provides functionality to easily record media*/ + audioRecorder.mediaRecorder = new MediaRecorder(stream) + /*the MediaRecorder interface of the MediaStream Recording API provides functionality to easily record media*/ //clear previously saved audio Blobs, if any audioRecorder.audioBlobs = [] From 398a31f4265e4cc9f450ff2caccf9ed097478673 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 17 Jan 2024 00:39:14 +0000 Subject: [PATCH 10/62] UI touchup --- packages/server/src/Interface.ts | 5 + packages/server/src/index.ts | 87 +++--- packages/ui/src/api/chatflows.js | 1 + .../ui/src/ui-component/button/ImageButton.js | 57 ++++ .../ui-component/cards/StarterPromptsCard.css | 1 - .../ui-component/cards/StarterPromptsCard.js | 7 +- .../ui/src/views/chatmessage/ChatMessage.css | 32 +-- .../ui/src/views/chatmessage/ChatMessage.js | 268 ++++++++++-------- .../src/views/chatmessage/audio-recording.js | 11 + 9 files changed, 297 insertions(+), 172 deletions(-) create mode 100644 packages/ui/src/ui-component/button/ImageButton.js diff --git a/packages/server/src/Interface.ts b/packages/server/src/Interface.ts index 942fe490..a944e064 100644 --- a/packages/server/src/Interface.ts +++ b/packages/server/src/Interface.ts @@ -214,3 +214,8 @@ export interface ICredentialReqBody { export interface ICredentialReturnResponse extends ICredential { plainDataObj: ICredentialDataDecrypted } + +export interface IUploadFileSizeAndTypes { + fileTypes: string[] + maxUploadSize: number +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index f62e2c56..4451b838 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -21,7 +21,8 @@ import { chatType, IChatMessage, IDepthQueue, - INodeDirectedGraph + INodeDirectedGraph, + IUploadFileSizeAndTypes } from './Interface' import { getNodeModulesPackagePath, @@ -57,7 +58,7 @@ import { Tool } from './database/entities/Tool' import { Assistant } from './database/entities/Assistant' import { ChatflowPool } from './ChatflowPool' import { CachePool } from './CachePool' -import { ICommonObject, IMessage, INodeOptionsValue, handleEscapeCharacters } from 'flowise-components' +import { ICommonObject, IMessage, INodeOptionsValue, INodeParams, handleEscapeCharacters } from 'flowise-components' import { createRateLimiter, getRateLimiter, initializeRateLimiter } from './utils/rateLimit' import { addAPIKey, compareKeys, deleteAPIKey, getApiKey, getAPIKeys, updateAPIKey } from './utils/apiKey' import { sanitizeMiddleware } from './utils/XSS' @@ -147,7 +148,9 @@ export class App { '/api/v1/node-icon/', '/api/v1/components-credentials-icon/', '/api/v1/chatflows-streaming', + '/api/v1/chatflows-uploads', '/api/v1/openai-assistants-file', + '/api/v1/get-upload-file', '/api/v1/ip' ] this.app.use((req, res, next) => { @@ -464,8 +467,45 @@ export class App { }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) - const obj = this.shouldAllowUploads(chatflow) - return res.json(obj) + const uploadAllowedNodes = ['OpenAIMultiModalChain', 'OpenAIWhisper'] + + try { + const flowObj = JSON.parse(chatflow.flowData) + let isUploadAllowed = false + const allowances: IUploadFileSizeAndTypes[] = [] + + flowObj.nodes.forEach((node: IReactFlowNode) => { + if (uploadAllowedNodes.indexOf(node.data.type) > -1) { + logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) + isUploadAllowed = true + + const allowance: IUploadFileSizeAndTypes = { + fileTypes: [], + maxUploadSize: 0 + } + + node.data.inputParams.map((param: INodeParams) => { + if (param.name === 'allowedUploadTypes') { + allowance.fileTypes = (param.default as string).split(';') + } + if (param.name === 'maxUploadSize') { + allowance.maxUploadSize = parseInt(param.default ? (param.default as string) : '0') + } + }) + + if (allowance.fileTypes && allowance.maxUploadSize) { + allowances.push(allowance) + } + } + }) + + return res.json({ + isUploadAllowed, + uploadFileSizeAndTypes: allowances + }) + } catch (e) { + return res.status(500).send(e) + } }) // ---------------------------------------- @@ -1058,10 +1098,14 @@ export class App { return res.status(500).send(`Invalid file path`) } const filePath = path.join(getUserHome(), '.flowise', 'gptvision', req.query.chatId as string, req.params.id) - console.log(filePath) - if (!path.isAbsolute(filePath) || !fs.existsSync(filePath)) { + //raise error if file path is not absolute + if (!path.isAbsolute(filePath)) return res.status(500).send(`Invalid file path`) + //raise error if file path contains '..' + if (filePath.includes('..')) return res.status(500).send(`Invalid file path`) + //only return from the .flowise gptvision folder + if (!(filePath.includes('.flowise') && filePath.includes('gptvision') && filePath.includes(req.query.chatId as string))) return res.status(500).send(`Invalid file path`) - } + res.setHeader('Content-Disposition', 'attachment; filename=' + path.basename(filePath)) streamFileToUser(res, filePath) }) @@ -1350,35 +1394,6 @@ export class App { }) } - private uploadAllowedNodes = ['OpenAIMultiModalChain', 'OpenAIWhisper'] - private shouldAllowUploads(result: ChatFlow): any { - const flowObj = JSON.parse(result.flowData) - let allowUploads = false - const allowances: any = [] - flowObj.nodes.forEach((node: IReactFlowNode) => { - if (this.uploadAllowedNodes.indexOf(node.data.type) > -1) { - logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) - allowUploads = true - const allowance: any = {} - node.data.inputParams.map((param: any) => { - if (param.name === 'allowedUploadTypes') { - allowance.allowedTypes = param.default.split(';') - } - if (param.name === 'maxUploadSize') { - allowance.maxUploadSize = parseInt(param.default ? param.default : '0') - } - }) - if (allowance.allowedTypes && allowance.maxUploadSize) { - allowances.push(allowance) - } - } - }) - return { - allowUploads, - allowed: allowances - } - } - /** * Validate API Key * @param {Request} req diff --git a/packages/ui/src/api/chatflows.js b/packages/ui/src/api/chatflows.js index c02ca5cd..586fe183 100644 --- a/packages/ui/src/api/chatflows.js +++ b/packages/ui/src/api/chatflows.js @@ -13,6 +13,7 @@ const updateChatflow = (id, body) => client.put(`/chatflows/${id}`, body) const deleteChatflow = (id) => client.delete(`/chatflows/${id}`) const getIsChatflowStreaming = (id) => client.get(`/chatflows-streaming/${id}`) + const getAllowChatflowUploads = (id) => client.get(`/chatflows-uploads/${id}`) export default { diff --git a/packages/ui/src/ui-component/button/ImageButton.js b/packages/ui/src/ui-component/button/ImageButton.js new file mode 100644 index 00000000..7a10b966 --- /dev/null +++ b/packages/ui/src/ui-component/button/ImageButton.js @@ -0,0 +1,57 @@ +import { styled } from '@mui/material/styles' +import ButtonBase from '@mui/material/ButtonBase' + +export const ImageButton = styled(ButtonBase)(({ theme }) => ({ + position: 'relative', + height: 200, + borderRadius: '10px', + [theme.breakpoints.down('sm')]: { + width: '100% !important', // Overrides inline-style + height: 100 + }, + '&:hover, &.Mui-focusVisible': { + zIndex: 1, + '& .MuiImageBackdrop-root': { + opacity: 0.4 + }, + '& .MuiImageMarked-root': { + opacity: 1 + }, + '& .MuiTypography-root': { + border: '4px solid currentColor' + } + } +})) + +export const ImageSrc = styled('span')({ + position: 'absolute', + borderRadius: '10px', + left: 0, + right: 0, + top: 0, + bottom: 0, + backgroundSize: 'cover', + backgroundPosition: 'center 40%' +}) + +export const ImageBackdrop = styled('span')(({ theme }) => ({ + position: 'absolute', + borderRadius: '10px', + left: 0, + right: 0, + top: 0, + bottom: 0, + backgroundColor: theme.palette.common.black, + opacity: 0.1, + transition: theme.transitions.create('opacity') +})) + +export const ImageMarked = styled('span')(() => ({ + height: 25, + width: 25, + backgroundColor: 'transparent', + position: 'absolute', + top: 'auto', + left: 'auto', + opacity: 0 +})) diff --git a/packages/ui/src/ui-component/cards/StarterPromptsCard.css b/packages/ui/src/ui-component/cards/StarterPromptsCard.css index 85c2d415..028b8b34 100644 --- a/packages/ui/src/ui-component/cards/StarterPromptsCard.css +++ b/packages/ui/src/ui-component/cards/StarterPromptsCard.css @@ -1,6 +1,5 @@ .button-container { position: absolute; - bottom: 0; z-index: 1000; display: flex; overflow-x: auto; diff --git a/packages/ui/src/ui-component/cards/StarterPromptsCard.js b/packages/ui/src/ui-component/cards/StarterPromptsCard.js index 3abd8378..cfec4ba4 100644 --- a/packages/ui/src/ui-component/cards/StarterPromptsCard.js +++ b/packages/ui/src/ui-component/cards/StarterPromptsCard.js @@ -3,9 +3,9 @@ import PropTypes from 'prop-types' import { Chip } from '@mui/material' import './StarterPromptsCard.css' -const StarterPromptsCard = ({ isGrid, starterPrompts, onPromptClick }) => { +const StarterPromptsCard = ({ isGrid, starterPrompts, sx, onPromptClick }) => { return ( - + {starterPrompts.map((sp, index) => ( onPromptClick(sp.prompt, e)} /> ))} @@ -15,7 +15,8 @@ const StarterPromptsCard = ({ isGrid, starterPrompts, onPromptClick }) => { StarterPromptsCard.propTypes = { isGrid: PropTypes.bool, - starterPrompts: PropTypes.arrayOf(PropTypes.string), + starterPrompts: PropTypes.array, + sx: PropTypes.object, onPromptClick: PropTypes.func } diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 3b0bb9e3..9e7a1857 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -146,6 +146,16 @@ align-items: center; } +.preview { + position: absolute; + bottom: 0; + z-index: 1000; + display: flex; + overflow-x: auto; + -webkit-overflow-scrolling: touch; /* For momentum scroll on mobile devices */ + scrollbar-width: none; /* For Firefox */ +} + .file-drop-field { position: relative; /* Needed to position the icon correctly */ /* Other styling for the field */ @@ -162,26 +172,6 @@ flex-direction: column; justify-content: center; align-items: center; - z-index: 10; /* Ensure it's above other content */ + z-index: 2000; /* Ensure it's above other content */ border: 2px dashed #0094ff; /* Example style */ } - -.preview-container { - -} - -.preview-card { - border: 2px solid #E7EDF3; - border-radius: 16%; - transition: 0.4s; -} - -.preview-card&:hover { - border-color: #5B9FED; -} - - -.button { - flex: 0 0 auto; /* Don't grow, don't shrink, base width on content */ - margin: 5px; /* Adjust as needed for spacing between buttons */ -} \ No newline at end of file diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index d9003f72..0d969c5e 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -1,4 +1,4 @@ -import { useCallback, useEffect, useRef, useState } from 'react' +import { useState, useRef, useEffect, useCallback } from 'react' import { useSelector } from 'react-redux' import PropTypes from 'prop-types' import socketIOClient from 'socket.io-client' @@ -8,30 +8,33 @@ import rehypeRaw from 'rehype-raw' import remarkGfm from 'remark-gfm' import remarkMath from 'remark-math' import axios from 'axios' -import audioUploadSVG from 'assets/images/wave-sound.jpg' import { Box, Button, Card, - CardActions, CardMedia, Chip, CircularProgress, Divider, - Grid, IconButton, InputAdornment, OutlinedInput, Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconCircleDot } from '@tabler/icons' +import { IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconCircleDot, IconTrash } from '@tabler/icons' +import robotPNG from 'assets/images/robot.png' +import userPNG from 'assets/images/account.png' +import audioUploadSVG from 'assets/images/wave-sound.jpg' // project import import { CodeBlock } from 'ui-component/markdown/CodeBlock' import { MemoizedReactMarkdown } from 'ui-component/markdown/MemoizedReactMarkdown' import SourceDocDialog from 'ui-component/dialog/SourceDocDialog' +import StarterPromptsCard from 'ui-component/cards/StarterPromptsCard' +import { cancelAudioRecording, startAudioRecording, stopAudioRecording } from './audio-recording' +import { ImageButton, ImageSrc, ImageBackdrop, ImageMarked } from 'ui-component/button/ImageButton' import './ChatMessage.css' import './audio-recording.css' @@ -46,12 +49,14 @@ import useApi from 'hooks/useApi' // Const import { baseURL, maxScroll } from 'store/constant' -import robotPNG from 'assets/images/robot.png' -import userPNG from 'assets/images/account.png' -import StarterPromptsCard from '../../ui-component/cards/StarterPromptsCard' +// Utils import { isValidURL, removeDuplicateURL, setLocalStorageChatflow } from 'utils/genericHelper' -import DeleteIcon from '@mui/icons-material/Delete' -import { cancelAudioRecording, startAudioRecording, stopAudioRecording } from './audio-recording' + +const messageImageStyle = { + width: '128px', + height: '128px', + objectFit: 'cover' +} export const ChatMessage = ({ open, chatflowid, isDialog }) => { const theme = useTheme() @@ -76,13 +81,13 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const inputRef = useRef(null) const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow) const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming) + const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads) const getChatflowConfig = useApi(chatflowsApi.getSpecificChatflow) const [starterPrompts, setStarterPrompts] = useState([]) // drag & drop and file input const fileUploadRef = useRef(null) - const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads) const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false) const [previews, setPreviews] = useState([]) const [isDragOver, setIsDragOver] = useState(false) @@ -91,20 +96,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const [isRecording, setIsRecording] = useState(false) const [recordingNotSupported, setRecordingNotSupported] = useState(false) - const handleDragOver = (e) => { - if (!isChatFlowAvailableForUploads) { - return - } - e.preventDefault() - } const isFileAllowedForUpload = (file) => { const constraints = getAllowChatFlowUploads.data + /** + * {isUploadAllowed: boolean, uploadFileSizeAndTypes: Array<{ fileTypes: string[], maxUploadSize: number }>} + */ let acceptFile = false - if (constraints.allowUploads) { + if (constraints.isUploadAllowed) { const fileType = file.type const sizeInMB = file.size / 1024 / 1024 - constraints.allowed.map((allowed) => { - if (allowed.allowedTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) { + constraints.uploadFileSizeAndTypes.map((allowed) => { + if (allowed.fileTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) { acceptFile = true } }) @@ -114,11 +116,13 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } return acceptFile } + const handleDrop = async (e) => { if (!isChatFlowAvailableForUploads) { return } e.preventDefault() + e.stopPropagation() setIsDragOver(false) let files = [] if (e.dataTransfer.files.length > 0) { @@ -156,10 +160,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const newFiles = await Promise.all(files) setPreviews((prevPreviews) => [...prevPreviews, ...newFiles]) - // if (newFiles.length > 0) { - // document.getElementById('messagelist').style.height = '80%' - // } } + if (e.dataTransfer.items) { for (const item of e.dataTransfer.items) { if (item.kind === 'string' && item.type.match('^text/uri-list')) { @@ -191,6 +193,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } } } + const handleFileChange = async (event) => { const fileObj = event.target.files && event.target.files[0] if (!fileObj) { @@ -247,9 +250,15 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } } + const handleDragOver = (e) => { + e.preventDefault() + e.stopPropagation() + } + const handleDragEnter = (e) => { if (isChatFlowAvailableForUploads) { e.preventDefault() + e.stopPropagation() setIsDragOver(true) } } @@ -257,34 +266,27 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const handleDragLeave = (e) => { if (isChatFlowAvailableForUploads) { e.preventDefault() + e.stopPropagation() if (e.originalEvent?.pageX !== 0 || e.originalEvent?.pageY !== 0) { + setIsDragOver(false) return false } - setIsDragOver(false) // Set the drag over state to false when the drag leaves + setIsDragOver(false) } } + const handleDeletePreview = (itemToDelete) => { if (itemToDelete.type === 'file') { URL.revokeObjectURL(itemToDelete.preview) // Clean up for file } setPreviews(previews.filter((item) => item !== itemToDelete)) } + const handleUploadClick = () => { // 👇️ open file input box on click of another element fileUploadRef.current.click() } - const previewStyle = { - width: '128px', - height: '64px', - objectFit: 'fit' // This makes the image cover the area, cropping it if necessary - } - const messageImageStyle = { - width: '128px', - height: '128px', - objectFit: 'cover' // This makes the image cover the area, cropping it if necessary - } - const clearPreviews = () => { // Revoke the data uris to avoid memory leaks previews.forEach((file) => URL.revokeObjectURL(file.preview)) @@ -295,11 +297,13 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { setIsRecording(true) startAudioRecording(setIsRecording, setRecordingNotSupported) } + const onRecordingCancelled = () => { cancelAudioRecording() setIsRecording(false) setRecordingNotSupported(false) } + const onRecordingStopped = () => { stopAudioRecording(addRecordingToPreviews) setIsRecording(false) @@ -505,7 +509,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { // Get chatflow uploads capability useEffect(() => { if (getAllowChatFlowUploads.data) { - setIsChatFlowAvailableForUploads(getAllowChatFlowUploads.data?.allowUploads ?? false) + setIsChatFlowAvailableForUploads(getAllowChatFlowUploads.data?.isUploadAllowed ?? false) } // eslint-disable-next-line react-hooks/exhaustive-deps }, [getAllowChatFlowUploads.data]) @@ -544,12 +548,18 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { useEffect(() => { let socket if (open && chatflowid) { + // API request getChatmessageApi.request(chatflowid) getIsChatflowStreamingApi.request(chatflowid) getAllowChatFlowUploads.request(chatflowid) getChatflowConfig.request(chatflowid) + + // Scroll to bottom scrollToBottom() + setIsRecording(false) + + // SocketIO socket = socketIOClient(baseURL) socket.on('connect', () => { @@ -584,20 +594,14 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { }, [open, chatflowid]) return ( -
- {isDragOver && getAllowChatFlowUploads.data?.allowUploads && ( + <> + {isDragOver && getAllowChatFlowUploads.data?.isUploadAllowed && ( Drop here to upload - {getAllowChatFlowUploads.data.allowed.map((allowed) => { + {getAllowChatFlowUploads.data.uploadFileSizeAndTypes.map((allowed) => { return ( <> - {allowed.allowedTypes?.join(', ')} + {allowed.fileTypes?.join(', ')} Max Allowed Size: {allowed.maxUploadSize} MB ) @@ -639,7 +643,13 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { )} )} -
+
{messages && messages.map((message, index) => { @@ -687,6 +697,42 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { })}
)} + {message.fileUploads && message.fileUploads.length > 0 && ( +
+ {message.fileUploads.map((item, index) => { + return ( + <> + {item.mime.startsWith('image/') ? ( + + + + ) : ( + // eslint-disable-next-line jsx-a11y/media-has-caption + + )} + + ) + })} +
+ )}
{/* Messages are being rendered in Markdown format */} { })}
)} - {message.fileUploads && - message.fileUploads.map((item, index) => { - return ( - <> - {item.mime.startsWith('image/') ? ( - - - - ) : ( - // eslint-disable-next-line jsx-a11y/media-has-caption - - )} - - ) - })} {message.sourceDocuments && (
{removeDuplicateURL(message).map((source, index) => { @@ -796,55 +818,79 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
{messages && messages.length === 1 && ( - + 0 ? 70 : 0 }} + starterPrompts={starterPrompts || []} + onPromptClick={handlePromptClick} + isGrid={isDialog} + /> )}
- -
+ +
{previews && previews.length > 0 && ( -
-
- - {previews.map((item, index) => ( - <> - {item.mime.startsWith('image/') ? ( - - - - - handleDeletePreview(item)} size='small'> - - - - - - ) : ( - - - - - handleDeletePreview(item)} size='small'> - - - - - - )} - - ))} - -
-
+ + {previews.map((item, index) => ( + <> + {item.mime.startsWith('image/') ? ( + handleDeletePreview(item)} + > + + + + + + + ) : ( + + + handleDeletePreview(item)} size='small'> + + + + )} + + ))} + )} +
+
{ maxRows={isDialog ? 7 : 2} startAdornment={ isChatFlowAvailableForUploads && ( - + { } /> {isChatFlowAvailableForUploads && ( - + )}
setSourceDialogOpen(false)} /> -
+ ) } diff --git a/packages/ui/src/views/chatmessage/audio-recording.js b/packages/ui/src/views/chatmessage/audio-recording.js index f5cba001..1fbaddc1 100644 --- a/packages/ui/src/views/chatmessage/audio-recording.js +++ b/packages/ui/src/views/chatmessage/audio-recording.js @@ -68,30 +68,39 @@ export function startAudioRecording(onRecordingStart, onUnsupportedBrowser) { //Error handling structure switch (error.name) { case 'AbortError': //error from navigator.mediaDevices.getUserMedia + // eslint-disable-next-line no-console console.log('An AbortError has occurred.') break case 'NotAllowedError': //error from navigator.mediaDevices.getUserMedia + // eslint-disable-next-line no-console console.log('A NotAllowedError has occurred. User might have denied permission.') break case 'NotFoundError': //error from navigator.mediaDevices.getUserMedia + // eslint-disable-next-line no-console console.log('A NotFoundError has occurred.') break case 'NotReadableError': //error from navigator.mediaDevices.getUserMedia + // eslint-disable-next-line no-console console.log('A NotReadableError has occurred.') break case 'SecurityError': //error from navigator.mediaDevices.getUserMedia or from the MediaRecorder.start + // eslint-disable-next-line no-console console.log('A SecurityError has occurred.') break case 'TypeError': //error from navigator.mediaDevices.getUserMedia + // eslint-disable-next-line no-console console.log('A TypeError has occurred.') break case 'InvalidStateError': //error from the MediaRecorder.start + // eslint-disable-next-line no-console console.log('An InvalidStateError has occurred.') break case 'UnknownError': //error from the MediaRecorder.start + // eslint-disable-next-line no-console console.log('An UnknownError has occurred.') break default: + // eslint-disable-next-line no-console console.log('An error occurred with the error name ' + error.name) } }) @@ -113,9 +122,11 @@ export function stopAudioRecording(addRecordingToPreviews) { //Error handling structure switch (error.name) { case 'InvalidStateError': //error from the MediaRecorder.stop + // eslint-disable-next-line no-console console.log('An InvalidStateError has occurred.') break default: + // eslint-disable-next-line no-console console.log('An error occurred with the error name ' + error.name) } }) From 8a14a52d9072746d40b377866120a1d68802772a Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 18 Jan 2024 13:03:27 +0530 Subject: [PATCH 11/62] GPT Vision: Renaming to OpenAIMultiModalChain and merging the functionality of Wisper. --- .../OpenAIMultiModalChain.ts} | 101 +++++++++++++----- .../OpenAIMultiModalChain}/VLLMChain.ts | 71 ++++++------ .../OpenAIMultiModalChain}/chain.svg | 0 .../nodes/multimodal/OpenAI/AudioWhisper.ts | 66 ------------ .../nodes/multimodal/OpenAI/audio.svg | 1 - .../nodes/multimodal/OpenAI/list.png | Bin 5002 -> 0 bytes packages/components/src/Interface.ts | 7 ++ packages/server/src/index.ts | 4 +- 8 files changed, 118 insertions(+), 132 deletions(-) rename packages/components/nodes/{multimodal/OpenAI/OpenAIVisionChain.ts => chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts} (77%) rename packages/components/nodes/{multimodal/OpenAI => chains/OpenAIMultiModalChain}/VLLMChain.ts (71%) rename packages/components/nodes/{multimodal/OpenAI => chains/OpenAIMultiModalChain}/chain.svg (100%) delete mode 100644 packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts delete mode 100644 packages/components/nodes/multimodal/OpenAI/audio.svg delete mode 100644 packages/components/nodes/multimodal/OpenAI/list.png diff --git a/packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts b/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts similarity index 77% rename from packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts rename to packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts index 1ff4f4c9..f62d58bc 100644 --- a/packages/components/nodes/multimodal/OpenAI/OpenAIVisionChain.ts +++ b/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts @@ -1,10 +1,17 @@ -import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + ICommonObject, + INode, + INodeData, + INodeOutputsValue, + INodeParams +} from "../../../src/Interface"; import { getBaseClasses, getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils' -import { OpenAIVisionChainInput, VLLMChain } from './VLLMChain' +import { OpenAIMultiModalChainInput, VLLMChain } from "./VLLMChain"; import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { formatResponse } from '../../outputparsers/OutputParserHelpers' +import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation"; -class OpenAIVisionChain_Chains implements INode { +class OpenAIMultiModalChain_Chains implements INode { label: string name: string version: number @@ -24,7 +31,7 @@ class OpenAIVisionChain_Chains implements INode { this.version = 1.0 this.type = 'OpenAIMultiModalChain' this.icon = 'chain.svg' - this.category = 'MultiModal' + this.category = 'Chains' this.badge = 'BETA' this.description = 'Chain to query against Image and Audio Input.' this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)] @@ -35,18 +42,20 @@ class OpenAIVisionChain_Chains implements INode { credentialNames: ['openAIApi'] } this.inputs = [ - { - label: 'Audio Input', - name: 'audioInput', - type: 'OpenAIWhisper', - optional: true - }, { label: 'Prompt', name: 'prompt', type: 'BasePromptTemplate', optional: true }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true + }, { label: 'Model Name', name: 'modelName', @@ -55,14 +64,38 @@ class OpenAIVisionChain_Chains implements INode { { label: 'gpt-4-vision-preview', name: 'gpt-4-vision-preview' - }, - { - label: 'whisper-1', - name: 'whisper-1' } ], default: 'gpt-4-vision-preview' }, + { + label: 'Speech to Text', + name: 'speechToText', + type: 'boolean', + optional: true, + }, + // TODO: only show when speechToText is true + { + label: 'Speech to Text Method', + description: 'How to turn audio into text', + name: 'speechToTextMode', + type: 'options', + options: [ + { + label: 'Transcriptions', + name: 'transcriptions', + description: 'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.' + }, + { + label: 'Translations', + name: 'translations', + description: 'Translate and transcribe the audio into english.' + } + ], + optional: false, + default: 'transcriptions', + additionalParams: true + }, { label: 'Image Resolution', description: 'This parameter controls the resolution in which the model views the image.', @@ -76,6 +109,10 @@ class OpenAIVisionChain_Chains implements INode { { label: 'High', name: 'high' + }, + { + label: 'Auto', + name: 'auto' } ], default: 'low', @@ -107,18 +144,11 @@ class OpenAIVisionChain_Chains implements INode { optional: true, additionalParams: true }, - { - label: 'Chain Name', - name: 'chainName', - type: 'string', - placeholder: 'Name Your Chain', - optional: true - }, { label: 'Accepted Upload Types', name: 'allowedUploadTypes', type: 'string', - default: 'image/gif;image/jpeg;image/png;image/webp', + default: 'image/gif;image/jpeg;image/png;image/webp;audio/mpeg;audio/x-wav;audio/mp4', hidden: true }, { @@ -154,19 +184,23 @@ class OpenAIVisionChain_Chains implements INode { const modelName = nodeData.inputs?.modelName as string const maxTokens = nodeData.inputs?.maxTokens as string const topP = nodeData.inputs?.topP as string - const whisperConfig = nodeData.inputs?.audioInput + const speechToText = nodeData.inputs?.speechToText as boolean - const fields: OpenAIVisionChainInput = { + + const fields: OpenAIMultiModalChainInput = { openAIApiKey: openAIApiKey, imageResolution: imageResolution, verbose: process.env.DEBUG === 'true', - imageUrls: options.uploads, + uploads: options.uploads, modelName: modelName } if (temperature) fields.temperature = parseFloat(temperature) if (maxTokens) fields.maxTokens = parseInt(maxTokens, 10) if (topP) fields.topP = parseFloat(topP) - if (whisperConfig) fields.whisperConfig = whisperConfig + if (speechToText) { + const speechToTextMode = nodeData.inputs?.speechToTextMode ?? 'transcriptions' + if (speechToTextMode) fields.speechToTextMode = speechToTextMode + } if (output === this.name) { const chain = new VLLMChain({ @@ -221,6 +255,17 @@ const runPrediction = async ( const isStreaming = options.socketIO && options.socketIOClientId const socketIO = isStreaming ? options.socketIO : undefined const socketIOClientId = isStreaming ? options.socketIOClientId : '' + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the LLM chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(isStreaming, e.message, socketIO, socketIOClientId) + return formatResponse(e.message) + } + } /** * Apply string transformation to reverse converted special chars: @@ -229,7 +274,7 @@ const runPrediction = async ( */ const promptValues = handleEscapeCharacters(promptValuesRaw, true) if (options?.uploads) { - chain.imageUrls = options.uploads + chain.uploads = options.uploads } if (promptValues && inputVariables.length > 0) { let seen: string[] = [] @@ -285,4 +330,4 @@ const runPrediction = async ( } } -module.exports = { nodeClass: OpenAIVisionChain_Chains } +module.exports = { nodeClass: OpenAIMultiModalChain_Chains } diff --git a/packages/components/nodes/multimodal/OpenAI/VLLMChain.ts b/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts similarity index 71% rename from packages/components/nodes/multimodal/OpenAI/VLLMChain.ts rename to packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts index dd44ebb5..2cf2ce95 100644 --- a/packages/components/nodes/multimodal/OpenAI/VLLMChain.ts +++ b/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts @@ -1,27 +1,30 @@ -import { OpenAI as OpenAIClient, ClientOptions } from 'openai' +import { OpenAI as OpenAIClient, ClientOptions, OpenAI } from 'openai' import { BaseChain, ChainInputs } from 'langchain/chains' import { ChainValues } from 'langchain/schema' -import { BasePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' +import { BasePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' import path from 'path' import { getUserHome } from '../../../src/utils' import fs from 'fs' +import { ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/src/resources/chat/completions' +import ChatCompletionCreateParamsNonStreaming = OpenAI.ChatCompletionCreateParamsNonStreaming +import { IFileUpload } from '../../../src' /** * Interface for the input parameters of the OpenAIVisionChain class. */ -export interface OpenAIVisionChainInput extends ChainInputs { +export interface OpenAIMultiModalChainInput extends ChainInputs { openAIApiKey?: string openAIOrganization?: string throwError?: boolean prompt?: BasePromptTemplate configuration?: ClientOptions - imageUrls?: [] - imageResolution?: string + uploads?: IFileUpload[] + imageResolution?: 'auto' | 'low' | 'high' temperature?: number modelName?: string maxTokens?: number topP?: number - whisperConfig?: any + speechToTextMode?: string } /** @@ -29,7 +32,7 @@ export interface OpenAIVisionChainInput extends ChainInputs { * Vision API. It extends the BaseChain class and implements the * OpenAIVisionChainInput interface. */ -export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { +export class VLLMChain extends BaseChain implements OpenAIMultiModalChainInput { static lc_name() { return 'VLLMChain' } @@ -37,8 +40,8 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { inputKey = 'input' outputKey = 'text' - imageUrls?: [] - imageResolution: string = 'low' + uploads?: IFileUpload[] + imageResolution: 'auto' | 'low' | 'high' openAIApiKey?: string openAIOrganization?: string clientConfig: ClientOptions @@ -49,9 +52,9 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { maxTokens?: number topP?: number - whisperConfig?: any + speechToTextMode?: any - constructor(fields: OpenAIVisionChainInput) { + constructor(fields: OpenAIMultiModalChainInput) { super(fields) this.throwError = fields?.throwError ?? false this.imageResolution = fields?.imageResolution ?? 'low' @@ -61,8 +64,8 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { this.modelName = fields?.modelName this.maxTokens = fields?.maxTokens this.topP = fields?.topP - this.imageUrls = fields?.imageUrls ?? [] - this.whisperConfig = fields?.whisperConfig ?? {} + this.uploads = fields?.uploads ?? [] + this.speechToTextMode = fields?.speechToTextMode ?? {} if (!this.openAIApiKey) { throw new Error('OpenAI API key not found') } @@ -81,8 +84,8 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { async _call(values: ChainValues): Promise { const userInput = values[this.inputKey] - const vRequest: any = { - model: this.modelName, + const vRequest: ChatCompletionCreateParamsNonStreaming = { + model: 'gpt-4-vision-preview', temperature: this.temperature, top_p: this.topP, messages: [] @@ -90,42 +93,42 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { if (this.maxTokens) vRequest.max_tokens = this.maxTokens else vRequest.max_tokens = 1024 - const userRole: any = { role: 'user' } - userRole.content = [] - userRole.content.push({ + const chatMessages: ChatCompletionContentPart[] = [] + const userRole: ChatCompletionMessageParam = { role: 'user', content: [] } + chatMessages.push({ type: 'text', text: userInput }) - if (this.whisperConfig && this.imageUrls && this.imageUrls.length > 0) { - const audioUploads = this.getAudioUploads(this.imageUrls) + if (this.speechToTextMode && this.uploads && this.uploads.length > 0) { + const audioUploads = this.getAudioUploads(this.uploads) for (const url of audioUploads) { const filePath = path.join(getUserHome(), '.flowise', 'gptvision', url.data, url.name) // as the image is stored in the server, read the file and convert it to base64 const audio_file = fs.createReadStream(filePath) - if (this.whisperConfig.purpose === 'transcription') { + if (this.speechToTextMode.purpose === 'transcriptions') { const transcription = await this.client.audio.transcriptions.create({ file: audio_file, model: 'whisper-1' }) - userRole.content.push({ + chatMessages.push({ type: 'text', text: transcription.text }) - } else if (this.whisperConfig.purpose === 'translation') { + } else if (this.speechToTextMode.purpose === 'translations') { const translation = await this.client.audio.translations.create({ file: audio_file, model: 'whisper-1' }) - userRole.content.push({ + chatMessages.push({ type: 'text', text: translation.text }) } } } - if (this.imageUrls && this.imageUrls.length > 0) { - const imageUploads = this.getImageUploads(this.imageUrls) + if (this.uploads && this.uploads.length > 0) { + const imageUploads = this.getImageUploads(this.uploads) for (const url of imageUploads) { let bf = url.data if (url.type == 'stored-file') { @@ -135,7 +138,7 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { const contents = fs.readFileSync(filePath) bf = 'data:' + url.mime + ';base64,' + contents.toString('base64') } - userRole.content.push({ + chatMessages.push({ type: 'image_url', image_url: { url: bf, @@ -144,6 +147,7 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { }) } } + userRole.content = chatMessages vRequest.messages.push(userRole) if (this.prompt && this.prompt instanceof ChatPromptTemplate) { let chatPrompt = this.prompt as ChatPromptTemplate @@ -151,12 +155,12 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { if (message instanceof SystemMessagePromptTemplate) { vRequest.messages.push({ role: 'system', - content: [ - { - type: 'text', - text: (message.prompt as any).template - } - ] + content: (message.prompt as any).template + }) + } else if (message instanceof HumanMessagePromptTemplate) { + vRequest.messages.push({ + role: 'user', + content: (message.prompt as any).template }) } }) @@ -164,7 +168,6 @@ export class VLLMChain extends BaseChain implements OpenAIVisionChainInput { let response try { - // @ts-ignore response = await this.client.chat.completions.create(vRequest) } catch (error) { if (error instanceof Error) { diff --git a/packages/components/nodes/multimodal/OpenAI/chain.svg b/packages/components/nodes/chains/OpenAIMultiModalChain/chain.svg similarity index 100% rename from packages/components/nodes/multimodal/OpenAI/chain.svg rename to packages/components/nodes/chains/OpenAIMultiModalChain/chain.svg diff --git a/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts b/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts deleted file mode 100644 index aa2c71e1..00000000 --- a/packages/components/nodes/multimodal/OpenAI/AudioWhisper.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { INode, INodeData, INodeParams } from '../../../src' - -class OpenAIAudioWhisper implements INode { - label: string - name: string - version: number - description: string - type: string - icon: string - badge: string - category: string - baseClasses: string[] - inputs: INodeParams[] - - constructor() { - this.label = 'Open AI Whisper' - this.name = 'openAIAudioWhisper' - this.version = 1.0 - this.type = 'OpenAIWhisper' - this.description = 'Speech to text using OpenAI Whisper API' - this.icon = 'audio.svg' - this.badge = 'BETA' - this.category = 'MultiModal' - this.baseClasses = [this.type] - this.inputs = [ - { - label: 'Purpose', - name: 'purpose', - type: 'options', - options: [ - { - label: 'Transcription', - name: 'transcription' - }, - { - label: 'Translation', - name: 'translation' - } - ], - default: 'transcription' - }, - { - label: 'Accepted Upload Types', - name: 'allowedUploadTypes', - type: 'string', - default: 'audio/mpeg;audio/x-wav;audio/mp4', - hidden: true - }, - { - label: 'Maximum Upload Size (MB)', - name: 'maxUploadSize', - type: 'number', - default: '5', - hidden: true - } - ] - } - - async init(nodeData: INodeData): Promise { - const purpose = nodeData.inputs?.purpose as string - - return { purpose } - } -} - -module.exports = { nodeClass: OpenAIAudioWhisper } diff --git a/packages/components/nodes/multimodal/OpenAI/audio.svg b/packages/components/nodes/multimodal/OpenAI/audio.svg deleted file mode 100644 index 3bcbbdcd..00000000 --- a/packages/components/nodes/multimodal/OpenAI/audio.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/packages/components/nodes/multimodal/OpenAI/list.png b/packages/components/nodes/multimodal/OpenAI/list.png deleted file mode 100644 index acb4e5d68f200207a97e10ee63125eb4e040fcec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5002 zcmd5<2{_c-`~QwX$Tk=|b*-5x5|dmh$v)YQu`66;%_w`J8`(7^WhzOw8AjF;6AD?9 zt+9+HvPP0U!u+R3|J(Ds&vWnd|3CNspZ}R>&ikG3`=0lG&pDs-dCyS$siOe5fsVco z00IF3i1q=fgTOU_Z4V2C2g<|4!_ULb&Br4I<$*xBAzVB>T!K(pA!(=}zo@i=lBhU* zpP-Ki5Tzx^00w=_^+(&o!obAL2>zzk;s!uq5CaSAUPcB65Ysmc5CfQz3Bt?+ z6;L!!?b^fti?XSOe@YoIpAzDf8RjCkr1YF$R<}gt5lu-cX_uSXG%tKVd#6YF_7?I} z%-y#XfE`TB2LgrwCxJT=0HBDfUIXvU1mzr=$?Av zr^3Z4(hY_R&KYZY3vo43i7l^BK>3|f0$736v|mlw7O&pcHT~^%iViW1(_*R9ap#Cq z4sZX2A)EEDY|Yb2s@Zn#6V48=WGjfsPNJZxUh^#8*~zX+FF&?3!)HeycQp?`a5JB9 z1=jrbi>(rtcUwXNG(g{w7lv%8Idl(d>-C6I{Arme*ec-ze#g~UsLrZ}ck1SF{E4QC zp0kGqBEpaZMkCmR)#mLy&}gj)|P46p`XO}6aW zGW)UsF8@9VbS;CA&|92K*tk>U#vlDtt`Q|cvh8xA{8wb3ccxkMB^zmb`CzWhkj?9_ zhIOZxxV;aV&pf5#LIvtI2s}+2b|0irp(B(2Lz5&_TdVWbq3sjbiRvsad#<;mo~YuB z{DW<#h_Huw6;Ne4_s0%J%cfmz661QCR`=I0lDZz^w|(K}EBs1eu}e$>t=eM`M4%M~ zsaUrtfeP2fw1P{Yy|YIw+1g-eeb|Po>Te;NDl@MfJHJc>BQRd?p6*5NnjEQ+f6h@J z+FF5~Ah(oV8KaBQL-Lk6ef5>eL9K6}eL32-0PEFFMa#5*ji!r;@+zBjM`nupINWS* zbWpd3U@dNFhg10^L>CO*lOFbABJS%6Mabgcy~US`T+2nF6!1r9O zIi4eU+s&)iMS6ry-PU*!IbOrr`F;PGdECv4ZJuWoPq{AMwGmR5Ll-$3qL-C|-Y_(d z(m{29&_FuO453_=~!60>4!hGIs8 z_KwL55ow5T74I4{vBs+r89{Dyb$D`nA{7JlxL{KZo56-DI{GU6IgaWJNex9P4@kNE zp=#GPj!EAa=x#14;BEqs7GBvaj|cjUqX zBPn#y|IP9GHq-OUF}+oDRDl26k&&J%X57e8aO!AS9fVVXt3EBXE|P_E7fvz8wi#iy znU7=(myB20hD+X^w1)*NWqEyF^x_SEt<_M)zG%XUPmT&jC3`+vGS`XUIkOZLH?@&7mpJ^gV`^!6{imGa>de!~~09Lzo8V=Mhnz!sShqtV&66&M|tRaNE`R2R> z=ee-tFy9m@44xpI=x>U;oIZ0rxH8#Z)-OV5p))6;ZpHC%_J?J?8s%fZ%5!AtR=9+v zC<>&-B|53Cpp4JSb!#Pf+QfWovCT0t>2?u12vHHU@hZPuT6KY^Z{cz*caq<3O!eYM za5cjX#F9LXCGQ!A1MNjBB$eRmXz%l+57q5M20Ml%aLbbiEZIlPM^=WXJ_%LiuIDbO z_`x@hu5ihWL`Wo1dO~{waZg=$Jrz$3b{@xt0C7s5h6{!xK4Ul=-w>bWK%UxE-@gpO z84fNyFymyEjjqB^25#VCBef-+)tc8l_6I~UJpB{avHbseAcDjme1|`~RA-Ju#bt!! z>-O zEVc!P7-lQdrs4K<YO~`ls1VK8muN?4Ax*CRuRwtGbPCr;sh5 z;iddhrlx*XQ2x1c!D-IsIn!{ph}rs9H~M4rg^x)U0?SW13q^BxA9pqXd!t4Q-{%4T zMs-_$8@EcUuGS&fcL7I2x%|ryKwtieSz{jmiGS11>6r}EvHm8zK*LS7+;^aZmM35&Em9-cW}7LYuFw9IR_zuI@%uBvgp07vO)n);Z2?L>kb+Ho@#Dvq z90J(f=)H?Ich4k?doXjHBo_s>yq{uBJ680=NN@=!@Mp5d1LcmD0;X+>8>4jX!}7!Y-Y_Wh^Nw|YP;6&S^phL7f@{-|t}c2u;55z1;L<{mumfh9{1 zmc641VU7)kszQH-a&;}`RUcRzE2qAL$sib!!IoC`Tb0@Q@tB(-(cAVlEZIqGeYWY<`&LB*E1wOK z`l&zxe32*F9`uMkI$#p>q! zk_YSJjxBg!stPTk0;aD*J&W3glQ&apAqQ4DHmigFKAGu-T~qPv#%xT6d$oH4WgiD$DG=4Uw@JRY&uH` zk8>28(2lOzDXVlW`$p{S^ekyMgtl zSEOF(Fm$xI?kO`W)2V6wfiQbx-epLs2TlIvpLa%EuBacbdu>JKD5x_R{VHKPRr3DX zy3cIW`;#zB;g4!KTm|*|`YcuPNgl7Cz%Y3d61;nu z81+3q`|eG%l-1{;0;?8F3C5Hii_MH0ikck`F=FTwOUPjl8@LB(4wMH}9rdhn?V?N! z5SQGST`zJSb<^N}?kz?%%vcbjYcq}y+yl4Y>10&E>3XKRRyWbLGRZVk+6d6zsqe!V z>DrcTnkj7rpt abstract clearChatMessages(overrideSessionId?: string): Promise } + +export interface IFileUpload { + data: string + type: string + name: string + mime: string +} \ No newline at end of file diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 4451b838..61aff470 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1695,9 +1695,7 @@ export class App { if (!endingNodeData) return res.status(500).send(`Ending node ${endingNode.id} data not found`) if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { - if (endingNodeData.type !== 'OpenAIMultiModalChain') { - return res.status(500).send(`Ending node must be either a Chain or Agent`) - } + return res.status(500).send(`Ending node must be either a Chain or Agent`) } if ( From 188311187a13429a4c92d5059edd957854d02587 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 18 Jan 2024 13:04:25 +0530 Subject: [PATCH 12/62] GPT Vision: Fix for error when only speech input is sent. --- packages/server/src/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 61aff470..7f1b9414 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1606,7 +1606,7 @@ export class App { if (incomingInput.uploads) { // @ts-ignore ;(incomingInput.uploads as any[]).forEach((upload: any) => { - if (upload.type === 'file') { + if (upload.type === 'file' || upload.type === 'audio') { const filename = upload.name const dir = path.join(getUserHome(), '.flowise', 'gptvision', chatId) if (!fs.existsSync(dir)) { @@ -1615,7 +1615,7 @@ export class App { const filePath = path.join(dir, filename) const splitDataURI = upload.data.split(',') const bf = Buffer.from(splitDataURI.pop() || '', 'base64') - //TODO: check if file exists, what should we do if it exists? + //writes data to a file, replacing the file if it already exists. fs.writeFileSync(filePath, bf) // don't need to store the file contents in chatmessage, just the filename and chatId upload.data = chatId From 9222aafc6f23f80ec40323c11ed070706a7daa87 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 18 Jan 2024 17:04:49 +0530 Subject: [PATCH 13/62] GPT Vision: Updated behaviour to submit voice recording directly without the need to do another submit. --- .../OpenAIMultiModalChain.ts | 37 ++++++---- .../chains/OpenAIMultiModalChain/VLLMChain.ts | 70 +++++++++++-------- .../ui/src/views/chatmessage/ChatMessage.js | 9 ++- 3 files changed, 69 insertions(+), 47 deletions(-) diff --git a/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts b/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts index f62d58bc..a3f7e815 100644 --- a/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts +++ b/packages/components/nodes/chains/OpenAIMultiModalChain/OpenAIMultiModalChain.ts @@ -1,15 +1,9 @@ -import { - ICommonObject, - INode, - INodeData, - INodeOutputsValue, - INodeParams -} from "../../../src/Interface"; +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils' -import { OpenAIMultiModalChainInput, VLLMChain } from "./VLLMChain"; +import { OpenAIMultiModalChainInput, VLLMChain } from './VLLMChain' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation"; +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' class OpenAIMultiModalChain_Chains implements INode { label: string @@ -72,7 +66,7 @@ class OpenAIMultiModalChain_Chains implements INode { label: 'Speech to Text', name: 'speechToText', type: 'boolean', - optional: true, + optional: true }, // TODO: only show when speechToText is true { @@ -84,7 +78,8 @@ class OpenAIMultiModalChain_Chains implements INode { { label: 'Transcriptions', name: 'transcriptions', - description: 'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.' + description: + 'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.' }, { label: 'Translations', @@ -186,7 +181,6 @@ class OpenAIMultiModalChain_Chains implements INode { const topP = nodeData.inputs?.topP as string const speechToText = nodeData.inputs?.speechToText as boolean - const fields: OpenAIMultiModalChainInput = { openAIApiKey: openAIApiKey, imageResolution: imageResolution, @@ -256,6 +250,22 @@ const runPrediction = async ( const socketIO = isStreaming ? options.socketIO : undefined const socketIOClientId = isStreaming ? options.socketIOClientId : '' const moderations = nodeData.inputs?.inputModeration as Moderation[] + const speechToText = nodeData.inputs?.speechToText as boolean + + if (options?.uploads) { + if (options.uploads.length === 1 && input.length === 0) { + if (speechToText) { + //special case, text input is empty, but we have an upload (recorded audio) + const convertedText = await chain.processAudioWithWisper(options.uploads[0], undefined) + //so we use the upload as input + input = convertedText + } + // do not send the audio file to the model + } else { + chain.uploads = options.uploads + } + } + if (moderations && moderations.length > 0) { try { // Use the output of the moderation chain as input for the LLM chain @@ -273,9 +283,6 @@ const runPrediction = async ( * TO: { "value": "hello i am ben\n\n\thow are you?" } */ const promptValues = handleEscapeCharacters(promptValuesRaw, true) - if (options?.uploads) { - chain.uploads = options.uploads - } if (promptValues && inputVariables.length > 0) { let seen: string[] = [] diff --git a/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts b/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts index 2cf2ce95..5fcb6252 100644 --- a/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts +++ b/packages/components/nodes/chains/OpenAIMultiModalChain/VLLMChain.ts @@ -101,42 +101,20 @@ export class VLLMChain extends BaseChain implements OpenAIMultiModalChainInput { }) if (this.speechToTextMode && this.uploads && this.uploads.length > 0) { const audioUploads = this.getAudioUploads(this.uploads) - for (const url of audioUploads) { - const filePath = path.join(getUserHome(), '.flowise', 'gptvision', url.data, url.name) - - // as the image is stored in the server, read the file and convert it to base64 - const audio_file = fs.createReadStream(filePath) - if (this.speechToTextMode.purpose === 'transcriptions') { - const transcription = await this.client.audio.transcriptions.create({ - file: audio_file, - model: 'whisper-1' - }) - chatMessages.push({ - type: 'text', - text: transcription.text - }) - } else if (this.speechToTextMode.purpose === 'translations') { - const translation = await this.client.audio.translations.create({ - file: audio_file, - model: 'whisper-1' - }) - chatMessages.push({ - type: 'text', - text: translation.text - }) - } + for (const upload of audioUploads) { + await this.processAudioWithWisper(upload, chatMessages) } } if (this.uploads && this.uploads.length > 0) { const imageUploads = this.getImageUploads(this.uploads) - for (const url of imageUploads) { - let bf = url.data - if (url.type == 'stored-file') { - const filePath = path.join(getUserHome(), '.flowise', 'gptvision', url.data, url.name) + for (const upload of imageUploads) { + let bf = upload.data + if (upload.type == 'stored-file') { + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) // as the image is stored in the server, read the file and convert it to base64 const contents = fs.readFileSync(filePath) - bf = 'data:' + url.mime + ';base64,' + contents.toString('base64') + bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') } chatMessages.push({ type: 'image_url', @@ -182,6 +160,40 @@ export class VLLMChain extends BaseChain implements OpenAIMultiModalChainInput { } } + public async processAudioWithWisper(upload: IFileUpload, chatMessages: ChatCompletionContentPart[] | undefined): Promise { + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) + + // as the image is stored in the server, read the file and convert it to base64 + const audio_file = fs.createReadStream(filePath) + if (this.speechToTextMode === 'transcriptions') { + const transcription = await this.client.audio.transcriptions.create({ + file: audio_file, + model: 'whisper-1' + }) + if (chatMessages) { + chatMessages.push({ + type: 'text', + text: transcription.text + }) + } + return transcription.text + } else if (this.speechToTextMode === 'translations') { + const translation = await this.client.audio.translations.create({ + file: audio_file, + model: 'whisper-1' + }) + if (chatMessages) { + chatMessages.push({ + type: 'text', + text: translation.text + }) + } + return translation.text + } + //should never get here + return '' + } + getAudioUploads = (urls: any[]) => { return urls.filter((url: any) => url.mime.startsWith('audio/')) } diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 0d969c5e..82b17ded 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -304,10 +304,11 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { setRecordingNotSupported(false) } - const onRecordingStopped = () => { + const onRecordingStopped = async () => { stopAudioRecording(addRecordingToPreviews) setIsRecording(false) setRecordingNotSupported(false) + handlePromptClick('') } const onSourceDialogClick = (data, title) => { @@ -366,7 +367,9 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (e) e.preventDefault() if (!promptStarterInput && userInput.trim() === '') { - return + if (!(previews.length === 1 && previews[0].type === 'audio')) { + return + } } let input = userInput @@ -626,7 +629,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
From f87d84997c0c948bc4572a225f248101e43c9b63 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 18 Jan 2024 17:11:43 +0530 Subject: [PATCH 14/62] GPT Vision: lint fixes --- packages/components/src/Interface.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index e7f6fe86..c098fdd8 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -240,4 +240,4 @@ export interface IFileUpload { type: string name: string mime: string -} \ No newline at end of file +} From e774bd3c12a98bff6066dcf155da629ccb50b0c4 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 19 Jan 2024 18:02:05 +0530 Subject: [PATCH 15/62] GPT Vision: Added multi model capabilities to ChatOpenAI and ConversationChain. --- .../ConversationChain/ConversationChain.ts | 35 ++++++-- .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 89 ++++++++++++++++++- packages/components/src/MultiModalUtils.ts | 87 ++++++++++++++++++ packages/server/src/index.ts | 43 +++++---- .../ui/src/views/chatmessage/ChatMessage.js | 4 +- 5 files changed, 229 insertions(+), 29 deletions(-) create mode 100644 packages/components/src/MultiModalUtils.ts diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index fcd9921e..0bba9b3c 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,4 +1,4 @@ -import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConversationChain } from 'langchain/chains' import { getBaseClasses } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' @@ -8,6 +8,8 @@ import { flatten } from 'lodash' import { Document } from 'langchain/document' import { RunnableSequence } from 'langchain/schema/runnable' import { StringOutputParser } from 'langchain/schema/output_parser' +import { addImagesToMessages, processSpeechToText } from '../../../src/MultiModalUtils' +import { HumanMessage } from 'langchain/schema' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -67,13 +69,15 @@ class ConversationChain_Chains implements INode { } async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { - const chain = prepareChain(nodeData, this.sessionId, options.chatHistory) + const chain = prepareChain(nodeData, options, this.sessionId) return chain } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory - const chain = prepareChain(nodeData, this.sessionId, options.chatHistory) + input = await processSpeechToText(nodeData, input, options) + + const chain = prepareChain(nodeData, options, this.sessionId) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) @@ -105,7 +109,7 @@ class ConversationChain_Chains implements INode { } } -const prepareChatPrompt = (nodeData: INodeData) => { +const prepareChatPrompt = (nodeData: INodeData, options: ICommonObject) => { const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const docs = nodeData.inputs?.document as Document[] @@ -128,16 +132,31 @@ const prepareChatPrompt = (nodeData: INodeData) => { if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}` - const chatPrompt = ChatPromptTemplate.fromMessages([ + // TODO: add audio uploads + // if (options.uploads.length > 0) { + // const audioUploads = getAudioUploads(options.uploads) + // for (const upload of audioUploads) { + // await this.processAudioWithWhisper(upload, chatMessages) + // } + // } + const imageContent = addImagesToMessages(nodeData, options) + + //TODO, this should not be any[], what interface should it be? + let promptMessages: any[] = [ SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage), new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) - ]) + ] + if (imageContent.length > 0) { + promptMessages.push(new HumanMessage({ content: imageContent })) + } + const chatPrompt = ChatPromptTemplate.fromMessages(promptMessages) return chatPrompt } -const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => { +const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { + const chatHistory = options.chatHistory const model = nodeData.inputs?.model as BaseChatModel const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' @@ -150,7 +169,7 @@ const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMes return history } }, - prepareChatPrompt(nodeData), + prepareChatPrompt(nodeData, options), model, new StringOutputParser() ]) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index 49326163..bc5814d0 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -19,7 +19,7 @@ class ChatOpenAI_ChatModels implements INode { constructor() { this.label = 'ChatOpenAI' this.name = 'chatOpenAI' - this.version = 2.0 + this.version = 3.0 this.type = 'ChatOpenAI' this.icon = 'openai.svg' this.category = 'Chat Models' @@ -152,6 +152,73 @@ class ChatOpenAI_ChatModels implements INode { type: 'json', optional: true, additionalParams: true + }, + { + label: 'Allow Image Uploads', + name: 'allowImageUploads', + type: 'boolean', + default: false, + optional: true + }, + { + label: 'Allow Audio Uploads', + name: 'allowAudioUploads', + type: 'boolean', + default: false, + optional: true + }, + { + label: 'Allow Speech to Text', + name: 'allowSpeechToText', + type: 'boolean', + default: false, + optional: true + }, + // TODO: only show when speechToText is true + { + label: 'Speech to Text Method', + description: 'How to turn audio into text', + name: 'speechToTextMode', + type: 'options', + options: [ + { + label: 'Transcriptions', + name: 'transcriptions', + description: + 'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.' + }, + { + label: 'Translations', + name: 'translations', + description: 'Translate and transcribe the audio into english.' + } + ], + optional: false, + default: 'transcriptions', + additionalParams: true + }, + { + label: 'Image Resolution', + description: 'This parameter controls the resolution in which the model views the image.', + name: 'imageResolution', + type: 'options', + options: [ + { + label: 'Low', + name: 'low' + }, + { + label: 'High', + name: 'high' + }, + { + label: 'Auto', + name: 'auto' + } + ], + default: 'low', + optional: false, + additionalParams: true } ] } @@ -168,6 +235,12 @@ class ChatOpenAI_ChatModels implements INode { const basePath = nodeData.inputs?.basepath as string const baseOptions = nodeData.inputs?.baseOptions + const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean + const allowAudioUploads = nodeData.inputs?.allowAudioUploads as boolean + const allowSpeechToText = nodeData.inputs?.allowSpeechToText as boolean + const speechToTextMode = nodeData.inputs?.speechToTextMode as string + const imageResolution = nodeData.inputs?.imageResolution as string + const credentialData = await getCredentialData(nodeData.credential ?? '', options) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) @@ -200,6 +273,20 @@ class ChatOpenAI_ChatModels implements INode { basePath, baseOptions: parsedBaseOptions }) + + const multiModal = { + allowImageUploads: allowImageUploads ?? false, + allowAudioUploads: allowAudioUploads ?? false, + allowSpeechToText: allowSpeechToText ?? false, + imageResolution, + speechToTextMode + } + Object.defineProperty(model, 'multiModal', { + enumerable: true, + configurable: true, + writable: true, + value: multiModal + }) return model } } diff --git a/packages/components/src/MultiModalUtils.ts b/packages/components/src/MultiModalUtils.ts new file mode 100644 index 00000000..513915a5 --- /dev/null +++ b/packages/components/src/MultiModalUtils.ts @@ -0,0 +1,87 @@ +import { ICommonObject, INodeData } from './Interface' +import { BaseChatModel } from 'langchain/chat_models/base' +import { type ClientOptions, OpenAIClient } from '@langchain/openai' +import { ChatOpenAI } from 'langchain/chat_models/openai' +import path from 'path' +import { getUserHome } from './utils' +import fs from 'fs' +import { MessageContent } from '@langchain/core/dist/messages' + +export const processSpeechToText = async (nodeData: INodeData, input: string, options: ICommonObject) => { + const MODEL_NAME = 'whisper-1' + + let model = nodeData.inputs?.model as BaseChatModel + if (model instanceof ChatOpenAI && (model as any).multiModal) { + const multiModalConfig = (model as any).multiModal + if (options?.uploads) { + if (options.uploads.length === 1 && input.length === 0 && options.uploads[0].mime === 'audio/webm') { + const upload = options.uploads[0] + //special case, text input is empty, but we have an upload (recorded audio) + if (multiModalConfig.allowSpeechToText) { + const openAIClientOptions: ClientOptions = { + apiKey: model.openAIApiKey, + organization: model.organization + } + const openAIClient = new OpenAIClient(openAIClientOptions) + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) + + // as the image is stored in the server, read the file and convert it to base64 + const audio_file = fs.createReadStream(filePath) + + if (multiModalConfig.speechToTextMode === 'transcriptions') { + const transcription = await openAIClient.audio.transcriptions.create({ + file: audio_file, + model: MODEL_NAME + }) + return transcription.text + } else if (multiModalConfig.speechToTextMode === 'translations') { + const translation = await openAIClient.audio.translations.create({ + file: audio_file, + model: MODEL_NAME + }) + return translation.text + } + } else { + throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.') + } + } + } + } + return input +} + +export const addImagesToMessages = (nodeData: INodeData, options: ICommonObject): MessageContent => { + const imageContent: MessageContent = [] + let model = nodeData.inputs?.model as BaseChatModel + if (model instanceof ChatOpenAI && (model as any).multiModal) { + if (options?.uploads && options?.uploads.length > 0) { + const imageUploads = getImageUploads(options.uploads) + for (const upload of imageUploads) { + let bf = upload.data + if (upload.type == 'stored-file') { + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) + + // as the image is stored in the server, read the file and convert it to base64 + const contents = fs.readFileSync(filePath) + bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') + } + imageContent.push({ + type: 'image_url', + image_url: { + url: bf, + detail: 'low' + } + }) + } + } + } + return imageContent +} + +export const getAudioUploads = (uploads: any[]) => { + return uploads.filter((url: any) => url.mime.startsWith('audio/')) +} + +export const getImageUploads = (uploads: any[]) => { + return uploads.filter((url: any) => url.mime.startsWith('image/')) +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 7f1b9414..da1057a9 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -467,40 +467,45 @@ export class App { }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) - const uploadAllowedNodes = ['OpenAIMultiModalChain', 'OpenAIWhisper'] + const uploadAllowedCategoryNodes = ['Chat Models'] try { const flowObj = JSON.parse(chatflow.flowData) - let isUploadAllowed = false const allowances: IUploadFileSizeAndTypes[] = [] - + let allowSpeechToText = false + let allowImageUploads = false + let allowAudioUploads = false flowObj.nodes.forEach((node: IReactFlowNode) => { - if (uploadAllowedNodes.indexOf(node.data.type) > -1) { + if (uploadAllowedCategoryNodes.indexOf(node.data.category) > -1) { logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) - isUploadAllowed = true - - const allowance: IUploadFileSizeAndTypes = { - fileTypes: [], - maxUploadSize: 0 - } + // there could be multiple components allowing uploads, so we check if it's already added + // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties node.data.inputParams.map((param: INodeParams) => { - if (param.name === 'allowedUploadTypes') { - allowance.fileTypes = (param.default as string).split(';') + if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads'] && !allowImageUploads) { + allowances.push({ + fileTypes: 'image/gif;image/jpeg;image/png;image/webp'.split(';'), + maxUploadSize: 5 + }) + allowImageUploads = true } - if (param.name === 'maxUploadSize') { - allowance.maxUploadSize = parseInt(param.default ? (param.default as string) : '0') + if (param.name === 'allowAudioUploads' && node.data.inputs?.['allowAudioUploads'] && !allowAudioUploads) { + allowances.push({ + fileTypes: 'audio/mpeg;audio/x-wav;audio/mp4'.split(';'), + maxUploadSize: 5 + }) + allowAudioUploads = true + } + if (param.name === 'allowSpeechToText' && node.data.inputs?.['allowSpeechToText']) { + allowSpeechToText = true } }) - - if (allowance.fileTypes && allowance.maxUploadSize) { - allowances.push(allowance) - } } }) return res.json({ - isUploadAllowed, + allowSpeechToText: allowSpeechToText, + isUploadAllowed: allowances.length > 0, uploadFileSizeAndTypes: allowances }) } catch (e) { diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 82b17ded..155b3e99 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -74,6 +74,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ]) const [socketIOClientId, setSocketIOClientId] = useState('') const [isChatFlowAvailableToStream, setIsChatFlowAvailableToStream] = useState(false) + const [isChatFlowAvailableForSpeech, setIsChatFlowAvailableForSpeech] = useState(false) const [sourceDialogOpen, setSourceDialogOpen] = useState(false) const [sourceDialogProps, setSourceDialogProps] = useState({}) const [chatId, setChatId] = useState(undefined) @@ -513,6 +514,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { useEffect(() => { if (getAllowChatFlowUploads.data) { setIsChatFlowAvailableForUploads(getAllowChatFlowUploads.data?.isUploadAllowed ?? false) + setIsChatFlowAvailableForSpeech(getAllowChatFlowUploads.data?.allowSpeechToText ?? false) } // eslint-disable-next-line react-hooks/exhaustive-deps }, [getAllowChatFlowUploads.data]) @@ -922,7 +924,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } endAdornment={ <> - {isChatFlowAvailableForUploads && ( + {isChatFlowAvailableForSpeech && ( onMicrophonePressed()} From 7e5d8e7294cdf7affa9fb6aeee67148d7cde40b0 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 22 Jan 2024 11:10:27 +0530 Subject: [PATCH 16/62] Fix image uploads appear on top of chat messages. Now image uploads will appear above the text input on its own row. --- .../src/views/chatmessage/ChatExpandDialog.js | 7 +- .../ui/src/views/chatmessage/ChatMessage.css | 28 ++-- .../ui/src/views/chatmessage/ChatMessage.js | 140 +++++++++--------- .../ui/src/views/chatmessage/ChatPopUp.js | 9 +- 4 files changed, 95 insertions(+), 89 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatExpandDialog.js b/packages/ui/src/views/chatmessage/ChatExpandDialog.js index 1b2037a8..e2044ea3 100644 --- a/packages/ui/src/views/chatmessage/ChatExpandDialog.js +++ b/packages/ui/src/views/chatmessage/ChatExpandDialog.js @@ -21,7 +21,7 @@ const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => { aria-describedby='alert-dialog-description' sx={{ overflow: 'visible' }} > - +
{dialogProps.title}
@@ -43,7 +43,10 @@ const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => { )}
- + diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 9e7a1857..c00186bf 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -1,8 +1,6 @@ .messagelist { width: 100%; - height: 100%; - overflow-y: scroll; - overflow-x: hidden; + height: auto; border-radius: 0.5rem; } @@ -108,32 +106,38 @@ } .center { + width: 100%; display: flex; justify-content: center; align-items: center; position: relative; flex-direction: column; - padding: 10px; + padding: 12px; } -.cloud { +.cloud-wrapper, +.cloud-dialog-wrapper { width: 400px; height: calc(100vh - 260px); - overflow-y: scroll; - border-radius: 0.5rem; display: flex; - justify-content: center; - align-items: center; + align-items: start; + justify-content: start; + flex-direction: column; } +.cloud-dialog-wrapper { + width: 100%; +} + +.cloud, .cloud-dialog { width: 100%; - height: 100vh; + height: auto; + max-height: calc(100% - 72px); overflow-y: scroll; - border-radius: 0.5rem; display: flex; justify-content: center; - align-items: center; + align-items: start; } .cloud-message { diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 155b3e99..db2ed749 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -703,7 +703,14 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
)} {message.fileUploads && message.fileUploads.length > 0 && ( -
+
{message.fileUploads.map((item, index) => { return ( <> @@ -833,9 +840,9 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
-
+
{previews && previews.length > 0 && ( - + {previews.map((item, index) => ( <> {item.mime.startsWith('image/') ? ( @@ -886,85 +893,70 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ))} )} - -
- -
-
-
- + + + + + + + ) + } + endAdornment={ + <> + {isChatFlowAvailableForUploads && ( + onMicrophonePressed()} type='button' disabled={loading || !chatflowid} - edge='start' + edge='end' > - - ) - } - endAdornment={ - <> - {isChatFlowAvailableForSpeech && ( - - onMicrophonePressed()} - type='button' - disabled={loading || !chatflowid} - edge='end' - > - - - - )} - - - {loading ? ( -
- -
- ) : ( - // Send icon SVG in input field - - )} -
-
- - } - /> - {isChatFlowAvailableForUploads && ( - - )} - -
+ )} + + + {loading ? ( +
+ +
+ ) : ( + // Send icon SVG in input field + + )} +
+
+ + } + /> + {isChatFlowAvailableForUploads && ( + + )} +
setSourceDialogOpen(false)} /> diff --git a/packages/ui/src/views/chatmessage/ChatPopUp.js b/packages/ui/src/views/chatmessage/ChatPopUp.js index 670fb00f..91ba73e2 100644 --- a/packages/ui/src/views/chatmessage/ChatPopUp.js +++ b/packages/ui/src/views/chatmessage/ChatPopUp.js @@ -191,7 +191,14 @@ export const ChatPopUp = ({ chatflowid }) => { - + From 59643b65d9cadb40efdf65c436cfa8e2420ae8e0 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 22 Jan 2024 15:51:05 +0530 Subject: [PATCH 17/62] Fix the flickering issue when dragging files over the chat window --- .../ui/src/views/chatmessage/ChatMessage.css | 26 +++++++-- .../ui/src/views/chatmessage/ChatMessage.js | 53 ++++++++----------- 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index c00186bf..6acff1d5 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -119,16 +119,34 @@ .cloud-dialog-wrapper { width: 400px; height: calc(100vh - 260px); - display: flex; - align-items: start; - justify-content: start; - flex-direction: column; } .cloud-dialog-wrapper { width: 100%; } +.cloud-wrapper > div, +.cloud-dialog-wrapper > div { + width: 100%; + height: 100%; + display: flex; + align-items: start; + justify-content: start; + flex-direction: column; + position: relative; +} + +.image-dropzone { + position: absolute; + width: 100%; + height: 100%; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 2001; /* Ensure it's above other content */ +} + .cloud, .cloud-dialog { width: 100%; diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index db2ed749..c9c69ae0 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -91,7 +91,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { const fileUploadRef = useRef(null) const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false) const [previews, setPreviews] = useState([]) - const [isDragOver, setIsDragOver] = useState(false) + const [isDragActive, setIsDragActive] = useState(false) // recording const [isRecording, setIsRecording] = useState(false) @@ -123,8 +123,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { return } e.preventDefault() - e.stopPropagation() - setIsDragOver(false) + setIsDragActive(false) let files = [] if (e.dataTransfer.files.length > 0) { for (const file of e.dataTransfer.files) { @@ -251,28 +250,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } } - const handleDragOver = (e) => { - e.preventDefault() - e.stopPropagation() - } - - const handleDragEnter = (e) => { + const handleDrag = (e) => { if (isChatFlowAvailableForUploads) { e.preventDefault() e.stopPropagation() - setIsDragOver(true) - } - } - - const handleDragLeave = (e) => { - if (isChatFlowAvailableForUploads) { - e.preventDefault() - e.stopPropagation() - if (e.originalEvent?.pageX !== 0 || e.originalEvent?.pageY !== 0) { - setIsDragOver(false) - return false + if (e.type === 'dragenter' || e.type === 'dragover') { + console.log('drag enter') + setIsDragActive(true) + } else if (e.type === 'dragleave') { + console.log('drag leave') + setIsDragActive(false) } - setIsDragOver(false) } } @@ -599,8 +587,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { }, [open, chatflowid]) return ( - <> - {isDragOver && getAllowChatFlowUploads.data?.isUploadAllowed && ( +
+ {isDragActive && ( +
+ )} + {isDragActive && getAllowChatFlowUploads.data?.isUploadAllowed && ( Drop here to upload {getAllowChatFlowUploads.data.uploadFileSizeAndTypes.map((allowed) => { @@ -648,13 +645,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { )} )} -
+
{messages && messages.map((message, index) => { @@ -959,7 +950,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
setSourceDialogOpen(false)} /> - +
) } From 7d0ae5286c421c13a5d8e05ae789affba4f14032 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 22 Jan 2024 16:20:18 +0530 Subject: [PATCH 18/62] Fix chat popup styles and remove console statements --- packages/ui/src/views/chatmessage/ChatMessage.css | 1 + packages/ui/src/views/chatmessage/ChatMessage.js | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 6acff1d5..6742fbac 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -156,6 +156,7 @@ display: flex; justify-content: center; align-items: start; + flex-grow: 1; } .cloud-message { diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index c9c69ae0..51005a06 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -255,10 +255,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { e.preventDefault() e.stopPropagation() if (e.type === 'dragenter' || e.type === 'dragover') { - console.log('drag enter') setIsDragActive(true) } else if (e.type === 'dragleave') { - console.log('drag leave') setIsDragActive(false) } } From f384ad9086b3862b8ab7cf2c30f36f5f435a2bcf Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 22 Jan 2024 19:03:05 +0530 Subject: [PATCH 19/62] Update audio recording ui in internal chat --- .../ui/src/views/chatmessage/ChatMessage.js | 210 ++++++++++-------- .../src/views/chatmessage/audio-recording.css | 86 +++---- 2 files changed, 145 insertions(+), 151 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 51005a06..e52bcfd5 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -23,7 +23,7 @@ import { Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconCircleDot, IconTrash } from '@tabler/icons' +import { IconCircleDot, IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconSquare, IconTrash, IconX } from '@tabler/icons' import robotPNG from 'assets/images/robot.png' import userPNG from 'assets/images/account.png' import audioUploadSVG from 'assets/images/wave-sound.jpg' @@ -608,41 +608,6 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { })} )} - {isRecording && ( - -
- Recording -
- - - -
- - - -

00:00

-
- - - -
-
- {recordingNotSupported && ( -
-
-

To record audio, use browsers like Chrome and Firefox that support audio recording.

- -
-
- )} -
- )}
{messages && @@ -829,6 +794,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
+ +
{previews && previews.length > 0 && ( @@ -882,70 +849,129 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ))} )} -
- - - - - - ) - } - endAdornment={ - <> - {isChatFlowAvailableForUploads && ( - + {isRecording ? ( + <> + {recordingNotSupported && ( +
+
+

To record audio, use modern browsers like Chrome or Firefox that support audio recording.

+ +
+
+ )} + +
+ + + +

00:00

+
+
+ + + + + + +
+
+ + ) : ( + + onMicrophonePressed()} + onClick={handleUploadClick} type='button' disabled={loading || !chatflowid} - edge='end' + edge='start' > -
- )} - - - {loading ? ( -
- -
- ) : ( - // Send icon SVG in input field - - )} -
-
- - } - /> - {isChatFlowAvailableForUploads && ( - - )} - + ) + } + endAdornment={ + <> + {isChatFlowAvailableForUploads && ( + + onMicrophonePressed()} + type='button' + disabled={loading || !chatflowid} + edge='end' + > + + + + )} + + + {loading ? ( +
+ +
+ ) : ( + // Send icon SVG in input field + + )} +
+
+ + } + /> + {isChatFlowAvailableForUploads && ( + + )} + + )}
setSourceDialogOpen(false)} />
diff --git a/packages/ui/src/views/chatmessage/audio-recording.css b/packages/ui/src/views/chatmessage/audio-recording.css index fbca2f60..4b8e5566 100644 --- a/packages/ui/src/views/chatmessage/audio-recording.css +++ b/packages/ui/src/views/chatmessage/audio-recording.css @@ -8,20 +8,6 @@ * { box-sizing: border-box; } - .audio-recording-container { - width: 100%; - /* view port height*/ - /*targeting Chrome & Safari*/ - display: -webkit-flex; - /*targeting IE10*/ - display: -ms-flex; - display: flex; - flex-direction: column; - justify-content: center; - /*horizontal centering*/ - align-items: center; - background-color: white; - } .start-recording-button { font-size: 70px; color: #435f7a; @@ -36,34 +22,13 @@ /*targeting IE10*/ display: -ms-flex; display: flex; - justify-content: space-evenly; + justify-content: center; /*horizontal centering*/ align-items: center; - width: 334px; - margin-bottom: 30px; - background-color: white; - } - .cancel-recording-button, - .stop-recording-button { - font-size: 70px; - cursor: pointer; - } - .cancel-recording-button { - color: red; - opacity: 0.7; - } - .cancel-recording-button:hover { - color: rgb(206, 4, 4); - } - .stop-recording-button { - color: #33cc33; - opacity: 0.7; - } - .stop-recording-button:hover { - color: #27a527; + gap: 12px; } .recording-elapsed-time { - font-size: 32px; + font-size: 16px; /*targeting Chrome & Safari*/ display: -webkit-flex; /*targeting IE10*/ @@ -73,6 +38,15 @@ /*horizontal centering*/ align-items: center; } + .recording-elapsed-time #elapsed-time { + margin: 0; + } + .recording-indicator-wrapper { + position: relative; + display: flex; + width: 16px; + height: 16px; + } .red-recording-dot { font-size: 25px; color: red; @@ -136,17 +110,11 @@ opacity: 1; } } - .elapsed-time { - font-size: 32px; - } .recording-control-buttons-container.hide { display: none; } .overlay { - position: absolute; - top: 0; width: 100%; - background-color: rgba(82, 76, 76, 0.35); /*targeting Chrome & Safari*/ display: -webkit-flex; /*targeting IE10*/ @@ -155,6 +123,7 @@ justify-content: center; /*horizontal centering*/ align-items: center; + margin-bottom: 12px; } .overlay.hide { display: none; @@ -165,16 +134,15 @@ /*targeting IE10*/ display: -ms-flex; display: flex; - flex-direction: column; justify-content: space-between; /*horizontal centering*/ align-items: center; - width: 317px; - height: 119px; - background-color: white; - border-radius: 10px; - padding: 15px; + width: 100%; font-size: 16px; + gap: 12px; + } + .browser-not-supporting-audio-recording-box > p { + margin: 0; } .close-browser-not-supported-box { cursor: pointer; @@ -219,16 +187,16 @@ -o-animation-iteration-count: infinite; } .text-indication-of-audio-playing span:nth-child(2) { - animation-delay: .4s; - -webkit-animation-delay: .4s; - -moz-animation-delay: .4s; - -o-animation-delay: .4s; + animation-delay: 0.4s; + -webkit-animation-delay: 0.4s; + -moz-animation-delay: 0.4s; + -o-animation-delay: 0.4s; } .text-indication-of-audio-playing span:nth-child(3) { - animation-delay: .8s; - -webkit-animation-delay: .8s; - -moz-animation-delay: .8s; - -o-animation-delay: .8s; + animation-delay: 0.8s; + -webkit-animation-delay: 0.8s; + -moz-animation-delay: 0.8s; + -o-animation-delay: 0.8s; } /* The animation code */ @keyframes blinking-dot { @@ -278,4 +246,4 @@ opacity: 0; } } -} \ No newline at end of file +} From 318686e622fc3633cc8714aa49804faa14f98ad7 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 23 Jan 2024 11:03:38 +0530 Subject: [PATCH 20/62] Fix issue where audio recording is not sent on stopping recording --- .../ui/src/views/chatmessage/ChatMessage.js | 359 +++++++++--------- 1 file changed, 183 insertions(+), 176 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index e52bcfd5..006e2425 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -1,4 +1,4 @@ -import { useState, useRef, useEffect, useCallback } from 'react' +import { useState, useRef, useEffect, useCallback, Fragment } from 'react' import { useSelector } from 'react-redux' import PropTypes from 'prop-types' import socketIOClient from 'socket.io-client' @@ -96,6 +96,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { // recording const [isRecording, setIsRecording] = useState(false) const [recordingNotSupported, setRecordingNotSupported] = useState(false) + const [isLoadingRecording, setIsLoadingRecording] = useState(false) const isFileAllowedForUpload = (file) => { const constraints = getAllowChatFlowUploads.data @@ -292,10 +293,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } const onRecordingStopped = async () => { + setIsLoadingRecording(true) stopAudioRecording(addRecordingToPreviews) - setIsRecording(false) - setRecordingNotSupported(false) - handlePromptClick('') } const onSourceDialogClick = (data, title) => { @@ -364,14 +363,13 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (promptStarterInput !== undefined && promptStarterInput.trim() !== '') input = promptStarterInput setLoading(true) - const urls = [] - previews.map((item) => { - urls.push({ + const urls = previews.map((item) => { + return { data: item.data, type: item.type, name: item.name, mime: item.mime - }) + } }) clearPreviews() setMessages((prevMessages) => [...prevMessages, { message: input, type: 'userMessage', fileUploads: urls }]) @@ -383,7 +381,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { history: messages.filter((msg) => msg.message !== 'Hi there! How can I help?'), chatId } - if (urls) params.uploads = urls + if (urls && urls.length > 0) params.uploads = urls if (isChatFlowAvailableToStream) params.socketIOClientId = socketIOClientId const response = await predictionApi.sendMessageAndGetPrediction(chatflowid, params) @@ -584,6 +582,16 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { // eslint-disable-next-line react-hooks/exhaustive-deps }, [open, chatflowid]) + useEffect(() => { + // wait for audio recording to load and then send + if (previews.length === 1 && previews[0].type === 'audio') { + setIsRecording(false) + setRecordingNotSupported(false) + handlePromptClick('') + } + // eslint-disable-next-line + }, [previews]) + return (
{isDragActive && ( @@ -614,169 +622,167 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { messages.map((message, index) => { return ( // The latest message sent by the user will be animated while waiting for a response - <> - - {/* Display the correct icon depending on the message type */} - {message.type === 'apiMessage' ? ( - AI - ) : ( - Me - )} -
- {message.usedTools && ( -
- {message.usedTools.map((tool, index) => { - return ( - onSourceDialogClick(tool, 'Used Tools')} - /> - ) - })} -
- )} - {message.fileUploads && message.fileUploads.length > 0 && ( -
- {message.fileUploads.map((item, index) => { - return ( - <> - {item.mime.startsWith('image/') ? ( - - - - ) : ( - // eslint-disable-next-line jsx-a11y/media-has-caption - - )} - - ) - })} -
- )} -
- {/* Messages are being rendered in Markdown format */} - - ) : ( - - {children} - - ) - } - }} - > - {message.message} - + + {/* Display the correct icon depending on the message type */} + {message.type === 'apiMessage' ? ( + AI + ) : ( + Me + )} +
+ {message.usedTools && ( +
+ {message.usedTools.map((tool, index) => { + return ( + onSourceDialogClick(tool, 'Used Tools')} + /> + ) + })}
- {message.fileAnnotations && ( -
- {message.fileAnnotations.map((fileAnnotation, index) => { - return ( - - ) - })} -
- )} - {message.sourceDocuments && ( -
- {removeDuplicateURL(message).map((source, index) => { - const URL = - source.metadata && source.metadata.source - ? isValidURL(source.metadata.source) - : undefined - return ( - - URL ? onURLClick(source.metadata.source) : onSourceDialogClick(source) - } + )} + {message.fileUploads && message.fileUploads.length > 0 && ( +
+ {message.fileUploads.map((item, index) => { + return ( + <> + {item.mime.startsWith('image/') ? ( + + + + ) : ( + // eslint-disable-next-line jsx-a11y/media-has-caption + + )} + + ) + })} +
+ )} +
+ {/* Messages are being rendered in Markdown format */} + + ) : ( + + {children} + ) - })} -
- )} + } + }} + > + {message.message} +
- - + {message.fileAnnotations && ( +
+ {message.fileAnnotations.map((fileAnnotation, index) => { + return ( + + ) + })} +
+ )} + {message.sourceDocuments && ( +
+ {removeDuplicateURL(message).map((source, index) => { + const URL = + source.metadata && source.metadata.source + ? isValidURL(source.metadata.source) + : undefined + return ( + + URL ? onURLClick(source.metadata.source) : onSourceDialogClick(source) + } + /> + ) + })} +
+ )} +
+
) })}
@@ -800,11 +806,10 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { {previews && previews.length > 0 && ( {previews.map((item, index) => ( - <> + {item.mime.startsWith('image/') ? ( { backgroundColor: theme.palette.grey[500], flex: '0 0 auto' }} - key={index} variant='outlined' > { )} - + ))} )} @@ -854,7 +858,9 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { {recordingNotSupported && (
-

To record audio, use modern browsers like Chrome or Firefox that support audio recording.

+ + To record audio, use modern browsers like Chrome or Firefox that support audio recording. + + ) + } + }) + dispatch({ type: SET_CHATFLOW, chatflow: saveResp.data }) + } + onCancel() + } catch (error) { + const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}` + enqueueSnackbar({ + message: `Failed to save Analytic Configuration: ${errorData}`, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + } + + const setValue = (value, providerName, inputParamName) => { + let newVal = {} + if (!Object.prototype.hasOwnProperty.call(speechToText, providerName)) { + newVal = { ...speechToText, [providerName]: {} } + } else { + newVal = { ...speechToText } + } + + newVal[providerName][inputParamName] = value + if (inputParamName === 'status' && value === true) { + //ensure that the others are turned off + speechToTextProviders.forEach((provider) => { + if (provider.name !== providerName) { + newVal[provider.name] = { ...speechToText[provider.name], status: false } + } + }) + } + setSpeechToText(newVal) + } + + const handleAccordionChange = (providerName) => (event, isExpanded) => { + const accordionProviders = { ...providerExpanded } + accordionProviders[providerName] = isExpanded + setProviderExpanded(accordionProviders) + } + + useEffect(() => { + if (dialogProps.chatflow && dialogProps.chatflow.speechToText) { + try { + setSpeechToText(JSON.parse(dialogProps.chatflow.speechToText)) + } catch (e) { + setSpeechToText({}) + console.error(e) + } + } + + return () => { + setSpeechToText({}) + setProviderExpanded({}) + } + }, [dialogProps]) + + useEffect(() => { + if (show) dispatch({ type: SHOW_CANVAS_DIALOG }) + else dispatch({ type: HIDE_CANVAS_DIALOG }) + return () => dispatch({ type: HIDE_CANVAS_DIALOG }) + }, [show, dispatch]) + + const component = show ? ( + + + Speech To Text Configuration + + + {speechToTextProviders.map((provider, index) => ( + + } aria-controls={provider.name} id={provider.name}> + + +
+ AI +
+
+ + {provider.url} + + } + /> + {speechToText[provider.name] && speechToText[provider.name].status && ( +
+
+ ON +
+ )} + + + + {provider.inputs.map((inputParam, index) => ( + +
+ + {inputParam.label} + {!inputParam.optional &&  *} + {inputParam.description && ( + + )} + +
+ {providerExpanded[provider.name] && inputParam.type === 'credential' && ( + setValue(newValue, provider.name, 'credentialId')} + /> + )} + {inputParam.type === 'boolean' && ( + setValue(newValue, provider.name, inputParam.name)} + value={ + speechToText[provider.name] + ? speechToText[provider.name][inputParam.name] + : inputParam.default ?? false + } + /> + )} + {providerExpanded[provider.name] && + (inputParam.type === 'string' || + inputParam.type === 'password' || + inputParam.type === 'number') && ( + setValue(newValue, provider.name, inputParam.name)} + value={ + speechToText[provider.name] + ? speechToText[provider.name][inputParam.name] + : inputParam.default ?? '' + } + /> + )} +
+ ))} +
+ + ))} + + + + Save + + +
+ ) : null + + return createPortal(component, portalElement) +} + +SpeechToTextDialog.propTypes = { + show: PropTypes.bool, + dialogProps: PropTypes.object, + onCancel: PropTypes.func +} + +export default SpeechToTextDialog diff --git a/packages/ui/src/views/canvas/CanvasHeader.js b/packages/ui/src/views/canvas/CanvasHeader.js index 85408cd8..a8589f48 100644 --- a/packages/ui/src/views/canvas/CanvasHeader.js +++ b/packages/ui/src/views/canvas/CanvasHeader.js @@ -28,6 +28,7 @@ import useApi from 'hooks/useApi' import { generateExportFlowData } from 'utils/genericHelper' import { uiBaseURL } from 'store/constant' import { SET_CHATFLOW } from 'store/actions' +import SpeechToTextDialog from '../../ui-component/dialog/SpeechToTextDialog' // ==============================|| CANVAS HEADER ||============================== // @@ -46,6 +47,8 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl const [apiDialogProps, setAPIDialogProps] = useState({}) const [analyseDialogOpen, setAnalyseDialogOpen] = useState(false) const [analyseDialogProps, setAnalyseDialogProps] = useState({}) + const [speechToAudioDialogOpen, setSpeechToAudioDialogOpen] = useState(false) + const [speechToAudioDialogProps, setSpeechToAudioialogProps] = useState({}) const [conversationStartersDialogOpen, setConversationStartersDialogOpen] = useState(false) const [conversationStartersDialogProps, setConversationStartersDialogProps] = useState({}) const [viewMessagesDialogOpen, setViewMessagesDialogOpen] = useState(false) @@ -71,6 +74,12 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl chatflow: chatflow }) setAnalyseDialogOpen(true) + } else if (setting === 'enableSpeechToText') { + setSpeechToAudioialogProps({ + title: 'Speech to Text', + chatflow: chatflow + }) + setSpeechToAudioDialogOpen(true) } else if (setting === 'viewMessages') { setViewMessagesDialogProps({ title: 'View Messages', @@ -385,6 +394,11 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl /> setAPIDialogOpen(false)} /> setAnalyseDialogOpen(false)} /> + setSpeechToAudioDialogOpen(false)} + /> Date: Tue, 30 Jan 2024 15:30:07 +0530 Subject: [PATCH 23/62] Fix error message when audio recording is not available --- .../ui/src/views/chatmessage/ChatMessage.css | 2 +- .../ui/src/views/chatmessage/ChatMessage.js | 71 ++++++++++--------- .../src/views/chatmessage/audio-recording.css | 2 +- 3 files changed, 41 insertions(+), 34 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 6742fbac..b7217909 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -151,7 +151,7 @@ .cloud-dialog { width: 100%; height: auto; - max-height: calc(100% - 72px); + max-height: calc(100% - 54px); overflow-y: scroll; display: flex; justify-content: center; diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index bea1acd1..fdfcb90f 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -287,7 +287,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } const onRecordingCancelled = () => { - cancelAudioRecording() + if (!recordingNotSupported) cancelAudioRecording() setIsRecording(false) setRecordingNotSupported(false) } @@ -855,8 +855,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { )} {isRecording ? ( <> - {recordingNotSupported && ( -
+ {recordingNotSupported ? ( +
To record audio, use modern browsers like Chrome or Firefox that support audio recording. @@ -872,35 +872,42 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
+ ) : ( + +
+ + + + 00:00 + {isLoadingRecording && Sending...} +
+
+ + + + + + +
+
)} - -
- - - - 00:00 - {isLoadingRecording && Sending...} -
-
- - - - - - -
-
) : (
@@ -936,7 +943,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { } endAdornment={ <> - {isChatFlowAvailableForUploads && ( + {isChatFlowAvailableForSpeech && ( onMicrophonePressed()} diff --git a/packages/ui/src/views/chatmessage/audio-recording.css b/packages/ui/src/views/chatmessage/audio-recording.css index 4b8e5566..c5d9fac9 100644 --- a/packages/ui/src/views/chatmessage/audio-recording.css +++ b/packages/ui/src/views/chatmessage/audio-recording.css @@ -115,6 +115,7 @@ } .overlay { width: 100%; + height: '54px'; /*targeting Chrome & Safari*/ display: -webkit-flex; /*targeting IE10*/ @@ -123,7 +124,6 @@ justify-content: center; /*horizontal centering*/ align-items: center; - margin-bottom: 12px; } .overlay.hide { display: none; From 1d122084b9f8a9d66c3058aba675b0d72bfcd76f Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 30 Jan 2024 15:54:14 +0530 Subject: [PATCH 24/62] Fix auto scroll on audio messages --- packages/ui/src/views/chatmessage/ChatMessage.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index fdfcb90f..1281889a 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -616,8 +616,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { })} )} -
-
+
+
{messages && messages.map((message, index) => { return ( From 4604594c559e815a3cfd324b3f525fa78defc8da Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Tue, 30 Jan 2024 21:48:08 -0500 Subject: [PATCH 25/62] SpeechToText: Adding SpeechToText at the Chatflow level. --- .../credentials/AssemblyAI.credential.ts | 23 +++++++++ .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 36 +------------- .../ChatOpenAI/FlowiseChatOpenAI.ts | 21 +------- .../speechtotext/assemblyai/AssemblyAI.ts | 33 +++++++++++++ .../speechtotext/assemblyai/assemblyai.png | Bin 0 -> 8677 bytes packages/components/src/MultiModalUtils.ts | 43 ---------------- packages/server/src/NodesPool.ts | 2 +- packages/server/src/index.ts | 46 +++++++++++++++--- packages/server/src/utils/index.ts | 34 ++++++++++++- .../ui-component/dialog/SpeechToTextDialog.js | 10 ++-- 10 files changed, 136 insertions(+), 112 deletions(-) create mode 100644 packages/components/credentials/AssemblyAI.credential.ts create mode 100644 packages/components/nodes/speechtotext/assemblyai/AssemblyAI.ts create mode 100644 packages/components/nodes/speechtotext/assemblyai/assemblyai.png diff --git a/packages/components/credentials/AssemblyAI.credential.ts b/packages/components/credentials/AssemblyAI.credential.ts new file mode 100644 index 00000000..019cd7aa --- /dev/null +++ b/packages/components/credentials/AssemblyAI.credential.ts @@ -0,0 +1,23 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class AssemblyAIApi implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'AssemblyAI API' + this.name = 'assemblyAIApi' + this.version = 1.0 + this.inputs = [ + { + label: 'AssemblyAI Api Key', + name: 'assemblyAIApiKey', + type: 'password' + } + ] + } +} + +module.exports = { credClass: AssemblyAIApi } diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index 9543f1ee..1cb09f3f 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -162,36 +162,6 @@ class ChatOpenAI_ChatModels implements INode { default: false, optional: true }, - { - label: 'Allow Speech to Text', - name: 'allowSpeechToText', - type: 'boolean', - default: false, - optional: true - }, - // TODO: only show when speechToText is true - { - label: 'Speech to Text Method', - description: 'How to turn audio into text', - name: 'speechToTextMode', - type: 'options', - options: [ - { - label: 'Transcriptions', - name: 'transcriptions', - description: - 'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.' - }, - { - label: 'Translations', - name: 'translations', - description: 'Translate and transcribe the audio into english.' - } - ], - optional: false, - default: 'transcriptions', - additionalParams: true - }, { label: 'Image Resolution', description: 'This parameter controls the resolution in which the model views the image.', @@ -231,8 +201,6 @@ class ChatOpenAI_ChatModels implements INode { const baseOptions = nodeData.inputs?.baseOptions const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean - const allowSpeechToText = nodeData.inputs?.allowSpeechToText as boolean - const speechToTextMode = nodeData.inputs?.speechToTextMode as string const imageResolution = nodeData.inputs?.imageResolution as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) @@ -270,9 +238,7 @@ class ChatOpenAI_ChatModels implements INode { const multiModal = { allowImageUploads: allowImageUploads ?? false, - allowSpeechToText: allowSpeechToText ?? false, - imageResolution, - speechToTextMode + imageResolution } model.multiModal = multiModal return model diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 8af9c4df..1bf4a286 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -7,8 +7,7 @@ import { ChatOpenAICallOptions } from '@langchain/openai/dist/chat_models' import { BaseMessageChunk, BaseMessageLike, HumanMessage, LLMResult } from 'langchain/schema' import { Callbacks } from '@langchain/core/callbacks/manager' import { ICommonObject, INodeData } from '../../../src' -import { addImagesToMessages, checkSpeechToText } from '../../../src/MultiModalUtils' -import { ChatPromptTemplate, PromptTemplate } from 'langchain/prompts' +import { addImagesToMessages } from '../../../src/MultiModalUtils' export class FlowiseChatOpenAI extends ChatOpenAI { multiModal: {} @@ -38,24 +37,6 @@ export class FlowiseChatOpenAI extends ChatOpenAI { private async injectMultiModalMessages(messages: BaseMessageLike[][]) { const nodeData = FlowiseChatOpenAI.chainNodeData const optionsData = FlowiseChatOpenAI.chainNodeOptions - let audioTrans = await checkSpeechToText(nodeData, optionsData) - if (audioTrans) { - if (messages.length > 0) { - const lastMessage = messages[0].pop() as HumanMessage - if (!nodeData.inputs?.prompt) { - lastMessage.content = audioTrans - } else if (nodeData.inputs?.prompt instanceof ChatPromptTemplate) { - lastMessage.content = audioTrans - } else if (nodeData.inputs?.prompt instanceof PromptTemplate) { - let prompt = nodeData.inputs?.prompt as PromptTemplate - let inputVar = prompt.inputVariables[0] - let formattedValues: any = {} - formattedValues[inputVar] = audioTrans - lastMessage.content = await prompt.format(formattedValues) - } - messages[0].push(lastMessage) - } - } const messageContent = addImagesToMessages(nodeData, optionsData) if (messageContent) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { diff --git a/packages/components/nodes/speechtotext/assemblyai/AssemblyAI.ts b/packages/components/nodes/speechtotext/assemblyai/AssemblyAI.ts new file mode 100644 index 00000000..c5db6619 --- /dev/null +++ b/packages/components/nodes/speechtotext/assemblyai/AssemblyAI.ts @@ -0,0 +1,33 @@ +import { INode, INodeParams } from '../../../src/Interface' + +class AssemblyAI_SpeechToText implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs?: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'AssemblyAI' + this.name = 'assemblyAI' + this.version = 1.0 + this.type = 'AssemblyAI' + this.icon = 'assemblyai.png' + this.category = 'SpeechToText' + this.baseClasses = [this.type] + this.inputs = [] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['assemblyAIApi'] + } + } +} + +module.exports = { nodeClass: AssemblyAI_SpeechToText } diff --git a/packages/components/nodes/speechtotext/assemblyai/assemblyai.png b/packages/components/nodes/speechtotext/assemblyai/assemblyai.png new file mode 100644 index 0000000000000000000000000000000000000000..8919cb18b9c68087fae8cdba5722d44629ab49e1 GIT binary patch literal 8677 zcmV5gcHx>W@019+cPE!C%8;-4V+}ppo+sgS9001FKNklhX~w6IoVB?7pFa3O{$RVFaXcxm+%n%jI&pTrQW(<#M@PE|<$y zv)Tjd8rR>bYumm*pTAi@d6pIC!Rs;NM`cod)FIk&!7PimPrs5sepz8 z!Ez*T;y8gtLV*tx2N&g!j09f~IysWpaIiVJ@FL8islXf{B8ROn&ys?=Gvh#da7D2N zEIi=~U4R^0SqP$BR@caSH*(@k!QQbL$iP@MEk3HEW;m|TI50UT&J0Zc1ZEUM0dPAMS=i{ zuGvbkZgz(nm> zRqZWh<#zAkLs`XScs53iM|>a>m%{t%A#(} zFqUB}Rv($asn@>I4Xh@wY#! z)in*mQ+UKJ!Y$S_%}8(}dFL&ihjsIArn^5@HI3Y?w5Fg4$i7N^7gL1bkQC-fkh%I< z1ZNOaRCuMbd#tcUhPEA&O7(Y)LoDk;PfQ|^NO#z0!VEz0VAcM3*u|GLh2lLHe;9YY zR)|O*eht4EWPF9lFWL85oB5&50RnlYyxtvBeXQRNQl@eZ=7z=7U}9j|ND%V-6V>yU zOacK#DoZW?+Jv~SdaMA8)c!NA&-jieJKL>aylX8G@D2hZTK00X@)igZWCo$p87DS|POA4qxF#OSgR z?8+UXW{Z5{MEIsA`(?^T0URWItVb#}Uud;)uSOZ|k_zU=X@rDBa)C3d?tF*1Ura=t z(`A>X&maH+5}!uunvq)*bT4x9=)NB#{ef55g{4?N8gcs$F zr5dr0A+1qs=iA038dII*o+VOUD}m4?(nr;u?+}k_ME9%qEbAT%Hqj2L+al60&~zBYQ0) z&yD=3?J4);FpcqUlV2Rw;S#_oCrHK-sNPurjDMlzvP%MC{-PHQz=EfIQLi}G+Y|xg z5q}^^ZtI&Y#K=an+ixLMtwjc)IPQ>nO!7Uwd`{PIA+zeWNQV$O?hqdIt&`op$=ZIu z<)zNf_a8ZDwA?lbL{UpYQEJTujr#xpTE|2_*{6pv!(wfI=EI+sY`lU=C}y6^>B zf2Yvjg;<4jocWI59AfHDQvt{AasL>oH*$+^PS)UhNt~Q z)J+fwnHKd$@7uSw+I#IX^PROqn3dNQX*dY+afOB#_Wlifbe8$v1Y(E~Sti&HJcC&k zk1QQ|$XYKn`pkR-G$6t}2gB$Cfhb>huBY!Kja^}nwm#l}hfz@}(n!QZTnAw@vZG+K z=&j+;YwF!fgq#-|%riL9l;a@yx^qtrW{20)`zvVe0=W%GYZV@EtL;bxipD|kb=S-N zZ?g56`A(13N_aaHW!TVy0{f49KOB=kK5I=JG3ihubBrj&YQ)s!<-1Vv9E5nwE5y5F zfQ_xy%y-&R(CPgMJx#d>KaN0b>>h%r`=_nDwe^tHv3GEgG^|9-bz9>gd5MVgyxP(4 zeRl<|F@ZoPRwHOYqKdjv#qxsG*FBb1vQuyQi|a`s4^P(`QEuT*BUHl0kHM`js)mR3 zX6x}0pKw7gbZAED!>eL>sjqujPSZ$ZX9cYZ2LZ`ZB}KyEJ;NjXzCsp#JERcC)}TaY zr4U&{VQQ<7ya&jPgK&M_qayDl*P*Qs(7@N32ruDj*_1d4yTgo!z@xvKZ-_!6!r%)V z7%B5+C31ko-5d5Kkuef1RsxFA2_p?rg-bTHo3m<39+~pu)?(%x;$hfjiz&@cZo%?V zmik&6_LM_E1+7s)fC35;>F-hF?<0_0yqo1Jd#|ic3R+V%ft?t4P)#`q0?Ed3XzTD$ zts{xdzpPhUHMYfQ6cWmWnOm>Oullm};fMw^++93)rk)9>dCLpc%gaarg85i+sr{Pi@@76yI8=&-2^PQmKn~0xnnOz71 zdEVCTzVPwv$JXZ^hqrLqmp(l#y)bMcOXpb_$h8Yw9})|2_-^7Pz|!Ke^kRwR>%E10 z^{oYogt&r((Iu1rlMC@|=IeXNwU?SNWvm1yqY?Frp5ZBhaEx)`VH85Ne~2+85WvWU z=3--fa>*>aA5b!q_G`W)>s6X$*Q^|saCtJ`%@GF<0jZ;l9b~@Kx|0wrm^~eQKF?>q zcJ=Be^PM9oKvNO(<>`Qhwe!LZYwxyKxAkG&d908*9awsuL|lzro0mvfv2^7mC*zEd z{XpSg6Y1iz$oPYXb`ls>3Zy*qRd>O(nO5_e@AO!!tdr!UCg=5QzDPyVD2?rvGv6Bq zG=h_$kGxLma$Z~W1;xR4aa*tXPHR+6c*uG(=RIlbIJrLBTkIv&RZ`Ldv8*F zq}_AAAb^Nky$(&p;;0e`nJ)?fiMU49cFh-o7#_F=aF{p(A?H1B>x(pzvlW!>C2c*d z$O9^(DGe|=6v?VPQXOy34QSL}q{)I-Py!L_*WUSBv7R#j4`0jYS$B;znE7Jq^oc@_h{Zvjw_YMp zxD6y)XLjq`OMP)YzyTcUwF~Cx1vziMt)md@@XOw}RnQuxo$C^c`>Xexw)FbUcM(l< zXsn<$*%XV>?oA4qD1p>xzJxt{(q7H?A9rWFr8W)(QLHhLILtY)Vc!2$mq?@BbT^G` z@P|Z8STeh3e^7f>C7BR1uOdY!Er!Rfp9@{|KnLX_v4*#@_3muTk^vqCJ^al3`j;4= za*_X9OTN8TLVG))kcoX#j`O*j^GzV7#(SOl1|JY+NI>VMuCD-6{uOeq2}{?T{8%Y~ z$PyyJOR150XR~$RMT9DajYu=LemZ)=Z{JCoH?@&?FlE5vkGlnGpa@o$kfHqptuBKhAVHmp zg{9Ao2#!uUf6L3zbn@+y01++>lxyNLbDeifR8TtDy+|6He=HKtc)F71QV}5jneX82 z*jr7$H$a3W0+FXO@Ip&LCw7GpVi;-sK#N=jA^GkpRQlaQbx=8rJDarH{R^u0=SxBZ zLJ(nsw=bD_v2}k>$o#J=ZQA-VQ^9BmVO}ndvF0!1Aoud`DBeUyWSqtJ^kzlNn_>lAXtj*p;8Yly^SETsuu9i_m`Hh_?PVFW3h zHEU=q`R>ud;PE{Z(#YFY0kLPkz>!{tkp}<9b@VWd;3;<9Vq7KjQU~|9WA8B1;F<6K za;tNExvJP3bG|}fAoE&Vx1u~`WN66NkNLBAlyDX(f6sW_En7iIq5Z>T-`0|Em-v7X z)08-BPtTD0f|d>+mZM{!~zzHvx4{r1=nRnp|9w8nh@zQ{; zhkrhOVIv+dFk<@(nHNMHA-vytpoXgd_)dSxg_omIx!FzpO6c1&Utabbzs%xIy|x}6 z>A)a8#O?|ivGk?uP)@!6^TVOmP)_SVIO&BlUCVLvN9|FU-n`7pd3x)pGK@N`I33_M1jFO zgdfoCJWi3EJ zq|68gqD~oMq!vWTyjm7C5h2W)iqvFfK&-9%-)Z#riMac)xvh7O97+-?0%RCqR_QoQWxdP`A}7a2k{VvxqM=iqZifh#?n|OQTL( z)lYw=N>eXy*`3I+UGfEx-BtxKSrd0`-9rRbJU)!uZ))qoQ3DbbCEVXnTPs^S z$=7FOT-r*N-r6wv?h_!tM^JLM6E5_10Ra)uM}GOw+i6mvEfFE}mZh-ADVC6R0Ri;s zyF%wzGczQv%^yR(plwp+V= zM1DPR5u*iCY`HlM(LIm@Ns78Ko+|((eTgwKa(YMy7l)Bd$|knn=hbRr@;$qIl+Cud zU1^nKNX?@p^^I)(8;Us4MvG#r-)&Y0VwS^5l4G=+Z15;|fBB7{Q&S{4OJ~_CEOMZA z@)^lfGA=XpTx%lkpZPkB_(tz@vT8;NfAxqHG>LeW?4+{H%b9P%(D8Ar0nS_M0WI_|cv;GDr1p6Y_e zmdUphT9CJBOwdJD4d=Wj`I5Ebotv`!%mpVaBGP9-BqtZ0d7&mwYt6i=I#wyfD8fSK z+A#SBse>g2LZr{9H)^zW&U|x&5?2gAbEWIz$rno(gv5*0MH1rgQYy&2AVL)|(VxEk z*8GaeH|h{eXks!Kn-VyQJ+o33AnAL)qB*Kq_aJkLHx^I60Kz7O33pZj$qB4fl6gtK z^7BU(pAs%WvTpJ{LP(z370d9#H8!xRA@imR#4MuLXk931S;f{nB!G&Hh(rfP*r{kn z>d3qx0wwX{ySg@XJcJ#}qKJ@rO`(bOb*Tk+^3>~Cg{6ycXt8UD5=Ru5khKsAtW!vU zCVq=u;Zc*N=SR7Q5T;J)CsqXGg_AFq?h|6pR3O7~nmRIXs_w*IBu`LgoG$EA)=9ou z0=iJaYlN-T(@+^8yIua&^9UhD8SoLNPV!v=kgN*oC`!~jfKyB6{nyrU5adugjb&wV zTkn++>=6Yenifo6uVuAl-c-RZRwvXLi3yIfysZZx5VR55*Pe8VJX}xa{YK=38azQP z>`KtU)?ryhdaZ?=0t9a;QuNNTB1`AY7cr2;VWhM}S=QD8#9z61!*XIj;I=Y|+lyEhw)RuWub)1_3NE#U?)Ix}GjCFMwaeN+57-)56-c%>SN9?>9 zxGB|7@tpZ8WQm|z%PyZMXEd!0lH?v6HSR$J!Hwyqe# z;8BKw-N|~k4j@Smzoe1eC8Xxeo9b{H&}3j@wLm9Z%O+ne{W^gX7A~}bmr4Oi$(QDB z5-k<*vkoFh2pOjZQ@tapHuDNwSBl6Ps&HM`);qA6N(r0@rK+^_n0(Pfwu}KYEFXnn zWzjR=K7Ib*n2ewV8_|-)R?T-C%E=cp@OMiH(yW3=xG4Y<*U6M{tsam+$=6|o0kc=4 zFkz36tPWs;j@aH`F`*m^b*qX_2Cg*6IR7PfWPt0W7Qppjpek6JTtsslQh zCJbJ4f?6;_@|`vFURkjOAxJSslj<`sh-5}AC(5@KG0a)J=11hnf|X{_;j7^*Q#l~J zAf#|e0TM=NVJ%z2*1IcWuu3rt14)X$XE9AgQv_8yT zjT*4@zh}Ou0wHMOp0Qvz-JF0R!oRwx1k5(kvy?V6uS&kS$=x$9?#1~deSze=k9?#~ zgA!xz>E&xu)$otCb1VXmnH{H!mOE>rc_}N3mBR-Xz8)c9>j0rqU@aaK4#3k zsZawwPy)RDzzNEr1dVOHeP(iPAoHd=Xh6tZTqh87`Y{V2=zxjXflr?qTA)`OnOE34fC&HK zumn;{-;M}#WM0@1B@-}v6T^l+q;Degrph8f2ya9Y=;PYQ%$XM+P{VV0;T~-3o3V7x zd>ukiWI_uwWM0Z)1X*+=4g*XZnK#?ItN~6;?i$C+$BdS4cZNm^leU=WOG|)!pZUfY z9cTt_$F{@EmhLzRix{J212@T+W-_lv#6~Tggjdm7UOw|xkrxG6uO6RsDQ9WnoYy8_ z?ctyc(L+|kQkhq>K!2pnwJA%_wmt|= zOzvVt3BwBG$7`AQYrKP?Q0n@urM1jU@)b4M8$pP*8M(VWykP0*;fewkD1ipN*I?!a z5#QA_CzcQ{f|-|+Z|G8SUsBId58b6)W%_m$Fmou0S#4jGCC7K~R2 zNxi+ys}XTo@lUk!zb3Z#;pxnq5P_3%g(m3HWadrP6~u50sTkt5dy3R3M3SMrZdp>e zq~6>ymauidGy|}LX3}z=Oul>Q`1@k;g|UP*4hV=iRbT|gn0L<|VJLhV{EZ2dJ>(5RaV$gMOZ z%+RN-O8b_clJDo~)?Zk$R2`;gUYg z0WsqEdQGzRF6xm!c$$zw%uCg{rT=T|pR-94CS~4lJRC|49hQ^#$-E@rPo!YB1|lJ; za8C=7wS*+^4G52NKj-4)FbR;%2Hn@@(JuZyEIlsc5Hb@d-6TLVBX{(`M9?Lt;rF!k zScfEmV&ro{M993<6-3a*-J8J4+X3U(yEl4z%>T z#qBb--hD|zNL&sj3<&Y{W|p3k?`Ldzh%aVOuoA{wIwE+93rds_f}gLq$h;r|4BA3? zduBueT8LTJ1WHunf=W@BHv#09d<`OC0Wq-RX@hW+01*a~E*>ZPfeE*7W9k2sYX-ZG z!!Qty?S%^i%|)pH|5vv)S9l;9N^-ZCQX?moob=+s^NiKj=4(R4)4~K}?A!BBtSdM4 z9B;au;L({Q(l6~bGm1aJf{Km|{Yc{(qM;D8Ddrj3c$IvVLQ zCQRczAyIz!pKh&I?2s8l1&W(Mg_&)=&Rg1caOvf(LrOA0YV$P*Gp@WL#DqS}eIU}y zASBom=SikWm*xu@))*OpVKRh?>SaqC5V51Qjerq(QF!%8vyEl`+kx>iQv5c3s}~<_ zK)fA&TzK=O{NjICY;9-->481wTiOl{&D!a_Vn=DE2#`e2@F_d5NZpQ@svt4QsG z9d63PmtH7}nolV;J{ST_KSyu{VlwUXs^)tUA_pTjBKQvD2mnz=(6+|wui1Od*Lg*o z@gnABa0Jcwtz5zrF^1KUGrTf(-b-t(RU9Mtu&{*%d%lZ!!N7|+Pd;K>68-76iO4u7 zEI1m7{3)mKA@6v>;+B#|`j?gq3wZ^s+#DbRhqz$}D$P3;@sYRl*5=E##FYOS`|C)@ z4U;8UGm<7B$pNX&7e+t@UhWaIyo~g-RJkk>mErD{w)58J%Qe@ie3O>Ld5#k?C79Aw z>ds573m34$3p&Pst~SyI5EwC(V2Y?TIZ51k|LLimuuy#CzP0&Cj~MZtt7}K@NOy?H zOux9DjS;fjNEa_cMi`>f;Uwceua0#CfLV?@fq6?D>1VY9UwoR>_c;Oiign1~2TS|n z4i~Vr`8yUSXd|2(1U#I~@=)jyAxvZ_GMM+XWL0-806BSyf;4GyQc`zb5g7s)ub9Cy z#fT?nF?TFevj78F`kYMBc|{vm00_*O+T}5F{dlu?tP>t=v@`8-BA+DhyqDIeg2LmV zWH90z{M_9gOY=d1_%vZ?PRcBiE{N#+4uDu@eE;(EPp#^y5Klk?cj_k{PU`bx`bd{p zM;Z_Z4JTN5dbnrd1}3Ov*L=jBlnJ)6(C!;9fLKJ#2^Re7!9Er`A%T^ntTA^3SaZ)?h1f7>ycVwt@ zePS?*5P2(JYJX8ZFLp-yecsp2_q|hc@yD&v##4yZ={ax;e43;4ijKHVjf-E|$#jHH z4whX?CfLS8OJD`D!OXIPI76nl#l5S`$rRgIXdfrQo5fMJU`TM0ZKj`%G?}aOg2rdG{hKia;B9h6Gb}BcU12Uc-}E&C_|XB_L^d!w~Gkk za^E22t@LG0@SMyEh&10H5r&jeaf8AOqdE3@b*%rOjyr}#d-5dyY@PRS`(~8lFbqRM z%Z_7wfZqSA3u}sq$w;zukdF*cPmbHczD&q`R#Xpx1Co*~5OOImuW&o>AiE3M z?y8ugq6d_Cm43eYn$IB?^x`+(&O6BKg&Z$-- { - const MODEL_NAME = 'whisper-1' - let input = undefined - let model = nodeData.inputs?.model as BaseChatModel - if (model instanceof ChatOpenAI && (model as any).multiModal) { - const multiModalConfig = (model as any).multiModal - if (options?.uploads) { - if (options.uploads.length === 1 && options.uploads[0].mime === 'audio/webm') { - const upload = options.uploads[0] - //special case, text input is empty, but we have an upload (recorded audio) - if (multiModalConfig.allowSpeechToText) { - const openAIClientOptions: ClientOptions = { - apiKey: model.openAIApiKey, - organization: model.organization - } - const openAIClient = new OpenAIClient(openAIClientOptions) - const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) - - // as the image is stored in the server, read the file and convert it to base64 - const audio_file = fs.createReadStream(filePath) - - if (multiModalConfig.speechToTextMode === 'transcriptions') { - const transcription = await openAIClient.audio.transcriptions.create({ - file: audio_file, - model: MODEL_NAME - }) - return transcription.text - } else if (multiModalConfig.speechToTextMode === 'translations') { - const translation = await openAIClient.audio.translations.create({ - file: audio_file, - model: MODEL_NAME - }) - return translation.text - } - } else { - throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.') - } - } - } - } - return input -} - export const addImagesToMessages = (nodeData: INodeData, options: ICommonObject): MessageContent => { const imageContent: MessageContent = [] let model = nodeData.inputs?.model as BaseChatModel diff --git a/packages/server/src/NodesPool.ts b/packages/server/src/NodesPool.ts index f4681d4a..8b01e63a 100644 --- a/packages/server/src/NodesPool.ts +++ b/packages/server/src/NodesPool.ts @@ -54,7 +54,7 @@ export class NodesPool { } } - const skipCategories = ['Analytic'] + const skipCategories = ['Analytic', 'SpeechToText'] if (!skipCategories.includes(newNodeInstance.category)) { this.componentNodes[newNodeInstance.name] = newNodeInstance } diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index e7816311..7558c689 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -46,7 +46,8 @@ import { getSessionChatHistory, getAllConnectedNodes, clearSessionMemory, - findMemoryNode + findMemoryNode, + convertedSpeechToText } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -58,7 +59,7 @@ import { Tool } from './database/entities/Tool' import { Assistant } from './database/entities/Assistant' import { ChatflowPool } from './ChatflowPool' import { CachePool } from './CachePool' -import { ICommonObject, IMessage, INodeOptionsValue, INodeParams, handleEscapeCharacters } from 'flowise-components' +import { ICommonObject, IMessage, INodeOptionsValue, INodeParams, handleEscapeCharacters, IFileUpload } from 'flowise-components' import { createRateLimiter, getRateLimiter, initializeRateLimiter } from './utils/rateLimit' import { addAPIKey, compareKeys, deleteAPIKey, getApiKey, getAPIKeys, updateAPIKey } from './utils/apiKey' import { sanitizeMiddleware } from './utils/XSS' @@ -473,6 +474,17 @@ export class App { const flowObj = JSON.parse(chatflow.flowData) const allowances: IUploadFileSizeAndTypes[] = [] let allowSpeechToText = false + if (chatflow.speechToText) { + const speechToTextProviders = JSON.parse(chatflow.speechToText) + for (const provider in speechToTextProviders) { + const providerObj = speechToTextProviders[provider] + if (providerObj.status) { + allowSpeechToText = true + break + } + } + } + let allowImageUploads = false flowObj.nodes.forEach((node: IReactFlowNode) => { if (uploadAllowedCategoryNodes.indexOf(node.data.category) > -1) { @@ -488,9 +500,6 @@ export class App { }) allowImageUploads = true } - if (param.name === 'allowSpeechToText' && node.data.inputs?.['allowSpeechToText']) { - allowSpeechToText = true - } }) } }) @@ -1602,7 +1611,8 @@ export class App { if (incomingInput.uploads) { // @ts-ignore - ;(incomingInput.uploads as any[]).forEach((upload: any) => { + const uploads = incomingInput.uploads as IFileUpload[] + for (const upload of uploads) { if (upload.type === 'file' || upload.type === 'audio') { const filename = upload.name const dir = path.join(getUserHome(), '.flowise', 'gptvision', chatId) @@ -1618,7 +1628,29 @@ export class App { upload.data = chatId upload.type = 'stored-file' } - }) + + if (upload.mime === 'audio/webm' && incomingInput.uploads?.length === 1) { + //speechToText + let speechToTextConfig: any = {} + if (chatflow.speechToText) { + const speechToTextProviders = JSON.parse(chatflow.speechToText) + for (const provider in speechToTextProviders) { + const providerObj = speechToTextProviders[provider] + if (providerObj.status) { + speechToTextConfig = providerObj + speechToTextConfig['name'] = provider + break + } + } + } + if (speechToTextConfig) { + const speechToTextResult = await convertedSpeechToText(upload.data, speechToTextConfig) + if (speechToTextResult) { + incomingInput.question = speechToTextResult + } + } + } + } } let isStreamValid = false diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index dafe612c..92f4d450 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -593,7 +593,6 @@ export const resolveVariables = ( } const paramsObj = flowNodeData[types] ?? {} - getParamValues(paramsObj) return flowNodeData @@ -1079,3 +1078,36 @@ export const getAllValuesFromJson = (obj: any): any[] => { extractValues(obj) return values } + +export const convertedSpeechToText = async (upload: any, speechToTextConfig: any) => { + // const MODEL_NAME = 'whisper-1' + if (speechToTextConfig) { + //special case, text input is empty, but we have an upload (recorded audio) + // const openAIClientOptions: ClientOptions = { + // apiKey: model.openAIApiKey, + // organization: model.organization + // } + // const openAIClient = new OpenAIClient(openAIClientOptions) + // const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) + // + // // as the image is stored in the server, read the file and convert it to base64 + // const audio_file = fs.createReadStream(filePath) + // + // if (multiModalConfig.speechToTextMode === 'transcriptions') { + // const transcription = await openAIClient.audio.transcriptions.create({ + // file: audio_file, + // model: MODEL_NAME + // }) + // return transcription.text + // } else if (multiModalConfig.speechToTextMode === 'translations') { + // const translation = await openAIClient.audio.translations.create({ + // file: audio_file, + // model: MODEL_NAME + // }) + // return translation.text + // } + } else { + throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.') + } + return undefined +} diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index fa2b7a78..10b6f076 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -41,8 +41,8 @@ import chatflowsApi from 'api/chatflows' const speechToTextProviders = [ { - label: 'OpenAI Wisper', - name: 'openAIWisper', + label: 'OpenAI Whisper', + name: 'openAIWhisper', icon: openAISVG, url: 'https://platform.openai.com/docs/guides/speech-to-text', inputs: [ @@ -70,7 +70,7 @@ const speechToTextProviders = [ label: 'Connect Credential', name: 'credential', type: 'credential', - credentialNames: ['assemblyAiApi'] + credentialNames: ['assemblyAIApi'] }, { label: 'On/Off', @@ -101,7 +101,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { }) if (saveResp.data) { enqueueSnackbar({ - message: 'Analytic Configuration Saved', + message: 'Speech To Text Configuration Saved', options: { key: new Date().getTime() + Math.random(), variant: 'success', @@ -118,7 +118,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { } catch (error) { const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}` enqueueSnackbar({ - message: `Failed to save Analytic Configuration: ${errorData}`, + message: `Failed to save Speech To Text Configuration: ${errorData}`, options: { key: new Date().getTime() + Math.random(), variant: 'error', From e81927ee132985a51aafb96dd67f174b371a7b08 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 31 Jan 2024 07:48:38 -0500 Subject: [PATCH 26/62] SpeechToText: Adding SpeechToText at the Chatflow level. --- .../ChatOpenAI/FlowiseChatOpenAI.ts | 2 +- packages/components/package.json | 1 + packages/components/src/MultiModalUtils.ts | 1 - packages/components/src/index.ts | 1 + packages/components/src/speechToText.ts | 49 +++++++++++++++++++ packages/server/src/index.ts | 19 +++++-- packages/server/src/utils/index.ts | 33 ------------- 7 files changed, 67 insertions(+), 39 deletions(-) create mode 100644 packages/components/src/speechToText.ts diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 1bf4a286..b25ec0c3 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -38,7 +38,7 @@ export class FlowiseChatOpenAI extends ChatOpenAI { const nodeData = FlowiseChatOpenAI.chainNodeData const optionsData = FlowiseChatOpenAI.chainNodeOptions const messageContent = addImagesToMessages(nodeData, optionsData) - if (messageContent) { + if (messageContent?.length) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { const lastMessage = messages[0].pop() if (lastMessage instanceof HumanMessage) { diff --git a/packages/components/package.json b/packages/components/package.json index c90ea5cc..953a6c4c 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -40,6 +40,7 @@ "@upstash/redis": "^1.22.1", "@zilliz/milvus2-sdk-node": "^2.2.24", "apify-client": "^2.7.1", + "assemblyai": "^4.2.2", "axios": "1.6.2", "cheerio": "^1.0.0-rc.12", "chromadb": "^1.5.11", diff --git a/packages/components/src/MultiModalUtils.ts b/packages/components/src/MultiModalUtils.ts index 62e3513c..337cc105 100644 --- a/packages/components/src/MultiModalUtils.ts +++ b/packages/components/src/MultiModalUtils.ts @@ -1,6 +1,5 @@ import { ICommonObject, INodeData } from './Interface' import { BaseChatModel } from 'langchain/chat_models/base' -import { type ClientOptions, OpenAIClient } from '@langchain/openai' import { ChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getUserHome } from './utils' diff --git a/packages/components/src/index.ts b/packages/components/src/index.ts index ae2e380e..10cd1036 100644 --- a/packages/components/src/index.ts +++ b/packages/components/src/index.ts @@ -6,3 +6,4 @@ dotenv.config({ path: envPath, override: true }) export * from './Interface' export * from './utils' +export * from './speechToText' diff --git a/packages/components/src/speechToText.ts b/packages/components/src/speechToText.ts new file mode 100644 index 00000000..cc40cf21 --- /dev/null +++ b/packages/components/src/speechToText.ts @@ -0,0 +1,49 @@ +import { ICommonObject } from './Interface' +import { getCredentialData, getUserHome } from './utils' +import { type ClientOptions, OpenAIClient } from '@langchain/openai' +import fs from 'fs' +import path from 'path' +import { AssemblyAI } from 'assemblyai' + +export const convertSpeechToText = async (upload: any, speechToTextConfig: any, options: ICommonObject) => { + if (speechToTextConfig) { + const credentialId = speechToTextConfig.credentialId as string + const credentialData = await getCredentialData(credentialId ?? '', options) + const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) + + // as the image is stored in the server, read the file and convert it to base64 + const audio_file = fs.createReadStream(filePath) + + if (speechToTextConfig.name === 'openAIWhisper') { + const openAIClientOptions: ClientOptions = { + apiKey: credentialData.openAIApiKey + } + const openAIClient = new OpenAIClient(openAIClientOptions) + + const transcription = await openAIClient.audio.transcriptions.create({ + file: audio_file, + model: 'whisper-1' + }) + if (transcription?.text) { + return transcription.text + } + } else if (speechToTextConfig.name === 'assemblyAiTranscribe') { + const client = new AssemblyAI({ + apiKey: credentialData.assemblyAIApiKey + }) + + const params = { + audio: audio_file, + speaker_labels: false + } + + const transcription = await client.transcripts.transcribe(params) + if (transcription?.text) { + return transcription.text + } + } + } else { + throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.') + } + return undefined +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 7558c689..17689bcb 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -46,8 +46,7 @@ import { getSessionChatHistory, getAllConnectedNodes, clearSessionMemory, - findMemoryNode, - convertedSpeechToText + findMemoryNode } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -59,7 +58,15 @@ import { Tool } from './database/entities/Tool' import { Assistant } from './database/entities/Assistant' import { ChatflowPool } from './ChatflowPool' import { CachePool } from './CachePool' -import { ICommonObject, IMessage, INodeOptionsValue, INodeParams, handleEscapeCharacters, IFileUpload } from 'flowise-components' +import { + ICommonObject, + IMessage, + INodeOptionsValue, + INodeParams, + handleEscapeCharacters, + convertSpeechToText, + IFileUpload +} from 'flowise-components' import { createRateLimiter, getRateLimiter, initializeRateLimiter } from './utils/rateLimit' import { addAPIKey, compareKeys, deleteAPIKey, getApiKey, getAPIKeys, updateAPIKey } from './utils/apiKey' import { sanitizeMiddleware } from './utils/XSS' @@ -1644,7 +1651,11 @@ export class App { } } if (speechToTextConfig) { - const speechToTextResult = await convertedSpeechToText(upload.data, speechToTextConfig) + const options: ICommonObject = { + appDataSource: this.AppDataSource, + databaseEntities: databaseEntities + } + const speechToTextResult = await convertSpeechToText(upload, speechToTextConfig, options) if (speechToTextResult) { incomingInput.question = speechToTextResult } diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 92f4d450..3ed00785 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -1078,36 +1078,3 @@ export const getAllValuesFromJson = (obj: any): any[] => { extractValues(obj) return values } - -export const convertedSpeechToText = async (upload: any, speechToTextConfig: any) => { - // const MODEL_NAME = 'whisper-1' - if (speechToTextConfig) { - //special case, text input is empty, but we have an upload (recorded audio) - // const openAIClientOptions: ClientOptions = { - // apiKey: model.openAIApiKey, - // organization: model.organization - // } - // const openAIClient = new OpenAIClient(openAIClientOptions) - // const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name) - // - // // as the image is stored in the server, read the file and convert it to base64 - // const audio_file = fs.createReadStream(filePath) - // - // if (multiModalConfig.speechToTextMode === 'transcriptions') { - // const transcription = await openAIClient.audio.transcriptions.create({ - // file: audio_file, - // model: MODEL_NAME - // }) - // return transcription.text - // } else if (multiModalConfig.speechToTextMode === 'translations') { - // const translation = await openAIClient.audio.translations.create({ - // file: audio_file, - // model: MODEL_NAME - // }) - // return translation.text - // } - } else { - throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.') - } - return undefined -} From 5c8f48c2f135711956156661d69f3ac51eb3a412 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 31 Jan 2024 19:03:39 -0500 Subject: [PATCH 27/62] Multimodal: Image Uploads. --- .../ConversationalAgent/ConversationalAgent.ts | 3 +++ .../nodes/agents/MRKLAgentChat/MRKLAgentChat.ts | 2 ++ packages/server/src/index.ts | 12 +++++++----- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 7f857b1c..a8c709a2 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -9,6 +9,7 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { AgentExecutor } from '../../../src/agents' import { ChatConversationalAgent } from 'langchain/agents' import { renderTemplate } from '@langchain/core/prompts' +import { injectChainNodeData } from '../../../src/MultiModalUtils' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -84,6 +85,8 @@ class ConversationalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + injectChainNodeData(nodeData, options) + const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 19835e36..6ea8d67e 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -5,6 +5,7 @@ import { Tool } from 'langchain/tools' import { BaseLanguageModel } from 'langchain/base_language' import { flatten } from 'lodash' import { additionalCallbacks } from '../../../src/handler' +import { injectChainNodeData } from '../../../src/MultiModalUtils' class MRKLAgentChat_Agents implements INode { label: string @@ -54,6 +55,7 @@ class MRKLAgentChat_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor + injectChainNodeData(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 17689bcb..361c00cf 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -475,7 +475,8 @@ export class App { }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) - const uploadAllowedCategoryNodes = ['Chat Models'] + const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent'] + const uploadProcessingNodes = ['chatOpenAI'] try { const flowObj = JSON.parse(chatflow.flowData) @@ -494,26 +495,27 @@ export class App { let allowImageUploads = false flowObj.nodes.forEach((node: IReactFlowNode) => { - if (uploadAllowedCategoryNodes.indexOf(node.data.category) > -1) { + if (uploadProcessingNodes.indexOf(node.data.name) > -1) { logger.debug(`[server]: Found Eligible Node ${node.data.type}, Allowing Uploads.`) // there could be multiple components allowing uploads, so we check if it's already added // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties node.data.inputParams.map((param: INodeParams) => { - if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads'] && !allowImageUploads) { + if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) { allowances.push({ fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), maxUploadSize: 5 }) - allowImageUploads = true } }) + } else if (uploadAllowedNodes.indexOf(node.data.name) > -1 && !allowImageUploads) { + allowImageUploads = true } }) return res.json({ allowSpeechToText: allowSpeechToText, - isUploadAllowed: allowances.length > 0, + isUploadAllowed: allowImageUploads, uploadFileSizeAndTypes: allowances }) } catch (e) { From aa5d1417a1bc9163136f6ae5ac806ad29787907e Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 31 Jan 2024 19:16:58 -0500 Subject: [PATCH 28/62] Multimodal: deleting uploads on delete of all chatmessages --- packages/server/src/index.ts | 7 ++++++- packages/server/src/utils/index.ts | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 361c00cf..b7ddac51 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -46,7 +46,8 @@ import { getSessionChatHistory, getAllConnectedNodes, clearSessionMemory, - findMemoryNode + findMemoryNode, + deleteFolderRecursive } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -618,6 +619,10 @@ export class App { if (sessionId) deleteOptions.sessionId = sessionId if (chatType) deleteOptions.chatType = chatType + /* Delete all multimodal uploads corresponding to this chatflow */ + const directory = path.join(getUserHome(), '.flowise', 'gptvision', chatflowid) + deleteFolderRecursive(directory) + const results = await this.AppDataSource.getRepository(ChatMessage).delete(deleteOptions) return res.json(results) }) diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 3ed00785..eb4e1936 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -1078,3 +1078,25 @@ export const getAllValuesFromJson = (obj: any): any[] => { extractValues(obj) return values } + +export const deleteFolderRecursive = (directory: string) => { + fs.readdir(directory, (error, files) => { + if (error) throw new Error('Could not read directory') + + files.forEach((file) => { + const file_path = path.join(directory, file) + + fs.stat(file_path, (error, stat) => { + if (error) throw new Error('File do not exist') + + if (!stat.isDirectory()) { + fs.unlink(file_path, (error) => { + if (error) throw new Error('Could not delete file') + }) + } else { + deleteFolderRecursive(file_path) + } + }) + }) + }) +} \ No newline at end of file From eab8c19f8c27adf62b1f20477c3518e55d5de046 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 31 Jan 2024 20:03:54 -0500 Subject: [PATCH 29/62] Multimodal: deleting uploads on delete of all chatmessages or chatflow --- packages/server/src/index.ts | 23 ++++++++++++++++----- packages/server/src/utils/index.ts | 32 ++++++++++++++++-------------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index b7ddac51..e655406d 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -431,6 +431,15 @@ export class App { // Delete chatflow via id this.app.delete('/api/v1/chatflows/:id', async (req: Request, res: Response) => { const results = await this.AppDataSource.getRepository(ChatFlow).delete({ id: req.params.id }) + + try { + /* Delete all multimodal uploads corresponding to this chatflow */ + const directory = path.join(getUserHome(), '.flowise', 'gptvision', req.params.id) + deleteFolderRecursive(directory) + } catch (e) { + logger.error(`[server]: Error deleting multimodal uploads: ${e}`) + } + return res.json(results) }) @@ -619,9 +628,13 @@ export class App { if (sessionId) deleteOptions.sessionId = sessionId if (chatType) deleteOptions.chatType = chatType - /* Delete all multimodal uploads corresponding to this chatflow */ - const directory = path.join(getUserHome(), '.flowise', 'gptvision', chatflowid) - deleteFolderRecursive(directory) + try { + /* Delete all multimodal uploads corresponding to this chatflow */ + const directory = path.join(getUserHome(), '.flowise', 'gptvision', chatflowid) + deleteFolderRecursive(directory) + } catch (e) { + logger.error(`[server]: Error deleting multimodal uploads: ${e}`) + } const results = await this.AppDataSource.getRepository(ChatMessage).delete(deleteOptions) return res.json(results) @@ -1629,7 +1642,7 @@ export class App { for (const upload of uploads) { if (upload.type === 'file' || upload.type === 'audio') { const filename = upload.name - const dir = path.join(getUserHome(), '.flowise', 'gptvision', chatId) + const dir = path.join(getUserHome(), '.flowise', 'gptvision', chatflowid) if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }) } @@ -1639,7 +1652,7 @@ export class App { //writes data to a file, replacing the file if it already exists. fs.writeFileSync(filePath, bf) // don't need to store the file contents in chatmessage, just the filename and chatId - upload.data = chatId + upload.data = chatflowid upload.type = 'stored-file' } diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index eb4e1936..2b6a22bd 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -1080,23 +1080,25 @@ export const getAllValuesFromJson = (obj: any): any[] => { } export const deleteFolderRecursive = (directory: string) => { - fs.readdir(directory, (error, files) => { - if (error) throw new Error('Could not read directory') + if (fs.existsSync(directory)) { + fs.readdir(directory, (error, files) => { + if (error) throw new Error('Could not read directory') - files.forEach((file) => { - const file_path = path.join(directory, file) + files.forEach((file) => { + const file_path = path.join(directory, file) - fs.stat(file_path, (error, stat) => { - if (error) throw new Error('File do not exist') + fs.stat(file_path, (error, stat) => { + if (error) throw new Error('File do not exist') - if (!stat.isDirectory()) { - fs.unlink(file_path, (error) => { - if (error) throw new Error('Could not delete file') - }) - } else { - deleteFolderRecursive(file_path) - } + if (!stat.isDirectory()) { + fs.unlink(file_path, (error) => { + if (error) throw new Error('Could not delete file') + }) + } else { + deleteFolderRecursive(file_path) + } + }) }) }) - }) -} \ No newline at end of file + } +} From a219efc91331b083c8a20090881cee3b2466336c Mon Sep 17 00:00:00 2001 From: Henry Heng Date: Fri, 2 Feb 2024 13:37:33 +0000 Subject: [PATCH 30/62] Rename MultiModalUtils.ts to multiModalUtils.ts --- .../components/src/{MultiModalUtils.ts => multiModalUtils.ts} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packages/components/src/{MultiModalUtils.ts => multiModalUtils.ts} (100%) diff --git a/packages/components/src/MultiModalUtils.ts b/packages/components/src/multiModalUtils.ts similarity index 100% rename from packages/components/src/MultiModalUtils.ts rename to packages/components/src/multiModalUtils.ts From c5bd4d41682348ac2b45871a59be3c8a52eb99f8 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 2 Feb 2024 13:52:49 +0000 Subject: [PATCH 31/62] address configuration fix and add BLOB_STORAGE_PATH env variable --- CONTRIBUTING-ZH.md | 1 + CONTRIBUTING.md | 1 + docker/.env.example | 1 + docker/README.md | 1 + docker/docker-compose.yml | 1 + .../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts | 2 +- packages/server/.env.example | 1 + packages/server/src/commands/start.ts | 4 ++++ 8 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING-ZH.md b/CONTRIBUTING-ZH.md index 7e35d194..b96cb86f 100644 --- a/CONTRIBUTING-ZH.md +++ b/CONTRIBUTING-ZH.md @@ -124,6 +124,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package | FLOWISE_USERNAME | 登录用户名 | 字符串 | | | FLOWISE_PASSWORD | 登录密码 | 字符串 | | | DEBUG | 打印组件的日志 | 布尔值 | | +| BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` | | LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` | | LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` | | APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 25a27e84..4d90a695 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -128,6 +128,7 @@ Flowise support different environment variables to configure your instance. You | FLOWISE_USERNAME | Username to login | String | | | FLOWISE_PASSWORD | Password to login | String | | | DEBUG | Print logs from components | Boolean | | +| BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` | | LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | | LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | | APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` | diff --git a/docker/.env.example b/docker/.env.example index a4beaf8a..18415673 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -3,6 +3,7 @@ DATABASE_PATH=/root/.flowise APIKEY_PATH=/root/.flowise SECRETKEY_PATH=/root/.flowise LOG_PATH=/root/.flowise/logs +BLOB_STORAGE_PATH=/root/.flowise/storage # CORS_ORIGINS="*" # IFRAME_ORIGINS="*" diff --git a/docker/README.md b/docker/README.md index 11b29cf3..49ce57c0 100644 --- a/docker/README.md +++ b/docker/README.md @@ -31,5 +31,6 @@ If you like to persist your data (flows, logs, apikeys, credentials), set these - APIKEY_PATH=/root/.flowise - LOG_PATH=/root/.flowise/logs - SECRETKEY_PATH=/root/.flowise +- BLOB_STORAGE_PATH=/root/.flowise/storage Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 71bcfcfb..cb45f37c 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -25,6 +25,7 @@ services: - FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE} - LOG_LEVEL=${LOG_LEVEL} - LOG_PATH=${LOG_PATH} + - BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH} - DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY} ports: - '${PORT}:${PORT}' diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 562cc9d9..de5739f5 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -22,7 +22,7 @@ export class ChatOpenAI extends LangchainChatOpenAI { /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { - super(fields) + super(fields, configuration) this.multiModalOption = fields?.multiModalOption this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo' this.configuredMaxToken = fields?.maxTokens diff --git a/packages/server/.env.example b/packages/server/.env.example index ebc59cf3..e2eb833f 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -5,6 +5,7 @@ PORT=3000 # APIKEY_PATH=/your_api_key_path/.flowise # SECRETKEY_PATH=/your_api_key_path/.flowise # LOG_PATH=/your_log_path/.flowise/logs +# BLOB_STORAGE_PATH=/your_database_path/.flowise/storage # NUMBER_OF_PROXIES= 1 diff --git a/packages/server/src/commands/start.ts b/packages/server/src/commands/start.ts index dfb20766..88713804 100644 --- a/packages/server/src/commands/start.ts +++ b/packages/server/src/commands/start.ts @@ -22,6 +22,7 @@ export default class Start extends Command { CORS_ORIGINS: Flags.string(), IFRAME_ORIGINS: Flags.string(), DEBUG: Flags.string(), + BLOB_STORAGE_PATH: Flags.string(), APIKEY_PATH: Flags.string(), SECRETKEY_PATH: Flags.string(), FLOWISE_SECRETKEY_OVERWRITE: Flags.string(), @@ -91,6 +92,9 @@ export default class Start extends Command { if (flags.FLOWISE_PASSWORD) process.env.FLOWISE_PASSWORD = flags.FLOWISE_PASSWORD if (flags.APIKEY_PATH) process.env.APIKEY_PATH = flags.APIKEY_PATH + // Storage + if (flags.BLOB_STORAGE_PATH) process.env.BLOB_STORAGE_PATH = flags.BLOB_STORAGE_PATH + // Credentials if (flags.SECRETKEY_PATH) process.env.SECRETKEY_PATH = flags.SECRETKEY_PATH if (flags.FLOWISE_SECRETKEY_OVERWRITE) process.env.FLOWISE_SECRETKEY_OVERWRITE = flags.FLOWISE_SECRETKEY_OVERWRITE From a4131dc21b1ed2f5a79ffd31f4e48fc53f2d6eae Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 2 Feb 2024 15:44:05 +0000 Subject: [PATCH 32/62] add fixes for chaining --- packages/server/src/index.ts | 5 ++++- packages/server/src/utils/index.ts | 12 ++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 02d6bf43..998801eb 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1954,7 +1954,10 @@ export class App { chatflowid, this.AppDataSource, incomingInput?.overrideConfig, - this.cachePool + this.cachePool, + false, + undefined, + incomingInput.uploads ) const nodeToExecute = diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 332819b8..31a5a5f4 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -27,7 +27,8 @@ import { ICommonObject, IDatabaseEntity, IMessage, - FlowiseMemory + FlowiseMemory, + IFileUpload } from 'flowise-components' import { randomBytes } from 'crypto' import { AES, enc } from 'crypto-js' @@ -279,7 +280,8 @@ export const buildLangchain = async ( overrideConfig?: ICommonObject, cachePool?: CachePool, isUpsert?: boolean, - stopNodeId?: string + stopNodeId?: string, + uploads?: IFileUpload[] ) => { const flowNodes = cloneDeep(reactFlowNodes) @@ -325,7 +327,8 @@ export const buildLangchain = async ( appDataSource, databaseEntities, cachePool, - dynamicVariables + dynamicVariables, + uploads }) logger.debug(`[server]: Finished upserting ${reactFlowNode.data.label} (${reactFlowNode.data.id})`) break @@ -340,7 +343,8 @@ export const buildLangchain = async ( appDataSource, databaseEntities, cachePool, - dynamicVariables + dynamicVariables, + uploads }) // Save dynamic variables From 041bfea94077d808de18a95bfb99bb161c69b573 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 2 Feb 2024 20:07:50 +0000 Subject: [PATCH 33/62] add more params --- packages/components/src/speechToText.ts | 5 ++- .../ui-component/dialog/SpeechToTextDialog.js | 39 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/packages/components/src/speechToText.ts b/packages/components/src/speechToText.ts index 6bb11e95..8524b525 100644 --- a/packages/components/src/speechToText.ts +++ b/packages/components/src/speechToText.ts @@ -21,7 +21,10 @@ export const convertSpeechToText = async (upload: IFileUpload, speechToTextConfi const transcription = await openAIClient.audio.transcriptions.create({ file: audio_file, - model: 'whisper-1' + model: 'whisper-1', + language: speechToTextConfig?.language, + temperature: speechToTextConfig?.temperature ? parseFloat(speechToTextConfig.temperature) : undefined, + prompt: speechToTextConfig?.prompt }) if (transcription?.text) { return transcription.text diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 10b6f076..9fc11a72 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -29,6 +29,7 @@ import { TooltipWithParser } from 'ui-component/tooltip/TooltipWithParser' import { SwitchInput } from 'ui-component/switch/Switch' import { Input } from 'ui-component/input/Input' import { StyledButton } from 'ui-component/button/StyledButton' +import { Dropdown } from 'ui-component/dropdown/Dropdown' import openAISVG from 'assets/images/openai.svg' import assemblyAIPng from 'assets/images/assemblyai.png' @@ -52,6 +53,31 @@ const speechToTextProviders = [ type: 'credential', credentialNames: ['openAIApi'] }, + { + label: 'Language', + name: 'language', + type: 'string', + description: + 'The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.', + placeholder: 'en', + optional: true + }, + { + label: 'Prompt', + name: 'prompt', + type: 'string', + rows: 4, + description: `An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.`, + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + description: `The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, + optional: true + }, { label: 'On/Off', name: 'status', @@ -306,6 +332,19 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { } /> )} + + {providerExpanded[provider.name] && inputParam.type === 'options' && ( + setValue(newValue, provider.name, inputParam.name)} + value={ + speechToText[provider.name] + ? speechToText[provider.name][inputParam.name] + : inputParam.default ?? 'choose an option' + } + /> + )} ))} From c504f91752a401ccefddcc7b833c0ca6fd3a258a Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 2 Feb 2024 17:45:12 -0500 Subject: [PATCH 34/62] Multimodal: guard to check for nodeData before image message insertion. --- .../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index de5739f5..9033b27f 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -33,7 +33,9 @@ export class ChatOpenAI extends LangchainChatOpenAI { } async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise { - await this.injectMultiModalMessages(messages) + if (ChatOpenAI.chainNodeData && ChatOpenAI.chainNodeOptions) { + await this.injectMultiModalMessages(messages) + } return super.generate(messages, options, callbacks) } From 8c494cf17e5f853d70fb56d1a9937d383a8a7455 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 6 Feb 2024 12:59:40 +0530 Subject: [PATCH 35/62] Fix UI issues - chat window height, image & audio styling, and image + audio not sending together --- .../ui-component/cards/StarterPromptsCard.css | 3 -- .../ui-component/cards/StarterPromptsCard.js | 5 ++- .../src/views/chatmessage/ChatExpandDialog.js | 14 +++++-- .../ui/src/views/chatmessage/ChatMessage.css | 11 ++++-- .../ui/src/views/chatmessage/ChatMessage.js | 38 +++++++++---------- .../ui/src/views/chatmessage/ChatPopUp.js | 5 ++- 6 files changed, 45 insertions(+), 31 deletions(-) diff --git a/packages/ui/src/ui-component/cards/StarterPromptsCard.css b/packages/ui/src/ui-component/cards/StarterPromptsCard.css index 028b8b34..8fc6c07c 100644 --- a/packages/ui/src/ui-component/cards/StarterPromptsCard.css +++ b/packages/ui/src/ui-component/cards/StarterPromptsCard.css @@ -1,6 +1,4 @@ .button-container { - position: absolute; - z-index: 1000; display: flex; overflow-x: auto; -webkit-overflow-scrolling: touch; /* For momentum scroll on mobile devices */ @@ -9,5 +7,4 @@ .button { flex: 0 0 auto; /* Don't grow, don't shrink, base width on content */ - margin: 5px; /* Adjust as needed for spacing between buttons */ } diff --git a/packages/ui/src/ui-component/cards/StarterPromptsCard.js b/packages/ui/src/ui-component/cards/StarterPromptsCard.js index cfec4ba4..bb3fbdfa 100644 --- a/packages/ui/src/ui-component/cards/StarterPromptsCard.js +++ b/packages/ui/src/ui-component/cards/StarterPromptsCard.js @@ -5,7 +5,10 @@ import './StarterPromptsCard.css' const StarterPromptsCard = ({ isGrid, starterPrompts, sx, onPromptClick }) => { return ( - + {starterPrompts.map((sp, index) => ( onPromptClick(sp.prompt, e)} /> ))} diff --git a/packages/ui/src/views/chatmessage/ChatExpandDialog.js b/packages/ui/src/views/chatmessage/ChatExpandDialog.js index e2044ea3..9b526e56 100644 --- a/packages/ui/src/views/chatmessage/ChatExpandDialog.js +++ b/packages/ui/src/views/chatmessage/ChatExpandDialog.js @@ -7,7 +7,7 @@ import { ChatMessage } from './ChatMessage' import { StyledButton } from 'ui-component/button/StyledButton' import { IconEraser } from '@tabler/icons' -const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => { +const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel, previews, setPreviews }) => { const portalElement = document.getElementById('portal') const customization = useSelector((state) => state.customization) @@ -47,7 +47,13 @@ const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => { className='cloud-dialog-wrapper' sx={{ display: 'flex', justifyContent: 'flex-end', flexDirection: 'column', p: 0 }} > - + ) : null @@ -59,7 +65,9 @@ ChatExpandDialog.propTypes = { show: PropTypes.bool, dialogProps: PropTypes.object, onClear: PropTypes.func, - onCancel: PropTypes.func + onCancel: PropTypes.func, + previews: PropTypes.array, + setPreviews: PropTypes.func } export default ChatExpandDialog diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 91bb9efa..e1646c80 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -115,14 +115,14 @@ padding: 12px; } -.cloud-wrapper, -.cloud-dialog-wrapper { +.cloud-wrapper { width: 400px; - height: calc(100vh - 260px); + height: calc(100vh - 180px); } .cloud-dialog-wrapper { width: 100%; + height: calc(100vh - 120px); } .cloud-wrapper > div, @@ -198,3 +198,8 @@ z-index: 2000; /* Ensure it's above other content */ border: 2px dashed #0094ff; /* Example style */ } + +.center audio { + height: 100%; + border-radius: 0; +} diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 1a9e6d35..89fd941a 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -58,7 +58,7 @@ const messageImageStyle = { objectFit: 'cover' } -export const ChatMessage = ({ open, chatflowid, isDialog }) => { +export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews }) => { const theme = useTheme() const customization = useSelector((state) => state.customization) @@ -90,7 +90,6 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { // drag & drop and file input const fileUploadRef = useRef(null) const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false) - const [previews, setPreviews] = useState([]) const [isDragActive, setIsDragActive] = useState(false) // recording @@ -353,7 +352,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { if (e) e.preventDefault() if (!promptStarterInput && userInput.trim() === '') { - if (!(previews.length === 1 && previews[0].type === 'audio')) { + const containsAudio = previews.filter((item) => item.type === 'audio').length > 0 + if (!(previews.length > 1 && containsAudio)) { return } } @@ -584,7 +584,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { useEffect(() => { // wait for audio recording to load and then send - if (previews.length === 1 && previews[0].type === 'audio') { + const containsAudio = previews.filter((item) => item.type === 'audio').length > 0 + if (previews.length > 1 && containsAudio) { setIsRecording(false) setRecordingNotSupported(false) handlePromptClick('') @@ -669,7 +670,8 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { display: 'flex', flexWrap: 'wrap', flexDirection: 'row', - width: '100%' + width: '100%', + gap: '4px' }} > {message.fileUploads.map((item, index) => { @@ -788,23 +790,22 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
-
- {messages && messages.length === 1 && ( + {messages && messages.length === 1 && starterPrompts.length > 0 && ( +
0 ? 70 : 0 }} starterPrompts={starterPrompts || []} onPromptClick={handlePromptClick} isGrid={isDialog} /> - )} - -
+
+ )}
{previews && previews.length > 0 && ( - + {previews.map((item, index) => ( {item.mime.startsWith('image/') ? ( @@ -827,23 +828,18 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ) : ( - + handleDeletePreview(item)} size='small'> @@ -993,5 +989,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => { ChatMessage.propTypes = { open: PropTypes.bool, chatflowid: PropTypes.string, - isDialog: PropTypes.bool + isDialog: PropTypes.bool, + previews: PropTypes.array, + setPreviews: PropTypes.func } diff --git a/packages/ui/src/views/chatmessage/ChatPopUp.js b/packages/ui/src/views/chatmessage/ChatPopUp.js index 91ba73e2..74d4b908 100644 --- a/packages/ui/src/views/chatmessage/ChatPopUp.js +++ b/packages/ui/src/views/chatmessage/ChatPopUp.js @@ -35,6 +35,7 @@ export const ChatPopUp = ({ chatflowid }) => { const [open, setOpen] = useState(false) const [showExpandDialog, setShowExpandDialog] = useState(false) const [expandDialogProps, setExpandDialogProps] = useState({}) + const [previews, setPreviews] = useState([]) const anchorRef = useRef(null) const prevOpen = useRef(open) @@ -199,7 +200,7 @@ export const ChatPopUp = ({ chatflowid }) => { boxShadow shadow={theme.shadows[16]} > - + @@ -211,6 +212,8 @@ export const ChatPopUp = ({ chatflowid }) => { dialogProps={expandDialogProps} onClear={clearChat} onCancel={() => setShowExpandDialog(false)} + previews={previews} + setPreviews={setPreviews} > ) From 9072e694ca2c01f079088306f88f9e4565850075 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 12 Feb 2024 13:19:17 +0530 Subject: [PATCH 36/62] Return uploads config in public chatbot config endpoint --- packages/server/src/index.ts | 130 +++++++++++++++++++---------------- 1 file changed, 70 insertions(+), 60 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 998801eb..218ae100 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -404,8 +404,9 @@ export class App { if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) if (chatflow.chatbotConfig) { try { + const uploadsConfig = await this.areUploadsEnabled(req.params.id) const parsedConfig = JSON.parse(chatflow.chatbotConfig) - return res.json(parsedConfig) + return res.json({ ...parsedConfig, ...uploadsConfig }) } catch (e) { return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`) } @@ -521,66 +522,9 @@ export class App { // Check if chatflow valid for uploads this.app.get('/api/v1/chatflows-uploads/:id', async (req: Request, res: Response) => { - const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ - id: req.params.id - }) - if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) - - const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent'] - const uploadProcessingNodes = ['chatOpenAI'] - try { - const flowObj = JSON.parse(chatflow.flowData) - const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = [] - - let isSpeechToTextEnabled = false - if (chatflow.speechToText) { - const speechToTextProviders = JSON.parse(chatflow.speechToText) - for (const provider in speechToTextProviders) { - const providerObj = speechToTextProviders[provider] - if (providerObj.status) { - isSpeechToTextEnabled = true - break - } - } - } - - let isImageUploadAllowed = false - const nodes: IReactFlowNode[] = flowObj.nodes - - /* - * Condition for isImageUploadAllowed - * 1.) one of the uploadAllowedNodes exists - * 2.) one of the uploadProcessingNodes exists + allowImageUploads is ON - */ - if (!nodes.some((node) => uploadAllowedNodes.includes(node.data.name))) { - return res.json({ - isSpeechToTextEnabled, - isImageUploadAllowed: false, - imgUploadSizeAndTypes - }) - } - - nodes.forEach((node: IReactFlowNode) => { - if (uploadProcessingNodes.indexOf(node.data.name) > -1) { - // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties - node.data.inputParams.map((param: INodeParams) => { - if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) { - imgUploadSizeAndTypes.push({ - fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), - maxUploadSize: 5 - }) - isImageUploadAllowed = true - } - }) - } - }) - - return res.json({ - isSpeechToTextEnabled, - isImageUploadAllowed, - imgUploadSizeAndTypes - }) + const uploadsConfig = await this.areUploadsEnabled(req.params.id) + return res.json(uploadsConfig) } catch (e) { return res.status(500).send(e) } @@ -1542,6 +1486,72 @@ export class App { return false } + /** + * Method that checks if uploads are enabled in the chatflow + * @param {string} chatflowid + */ + async areUploadsEnabled(chatflowid: string): Promise { + const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ + id: chatflowid + }) + if (!chatflow) return `Chatflow ${chatflowid} not found` + + const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent'] + const uploadProcessingNodes = ['chatOpenAI'] + + const flowObj = JSON.parse(chatflow.flowData) + const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = [] + + let isSpeechToTextEnabled = false + if (chatflow.speechToText) { + const speechToTextProviders = JSON.parse(chatflow.speechToText) + for (const provider in speechToTextProviders) { + const providerObj = speechToTextProviders[provider] + if (providerObj.status) { + isSpeechToTextEnabled = true + break + } + } + } + + let isImageUploadAllowed = false + const nodes: IReactFlowNode[] = flowObj.nodes + + /* + * Condition for isImageUploadAllowed + * 1.) one of the uploadAllowedNodes exists + * 2.) one of the uploadProcessingNodes exists + allowImageUploads is ON + */ + if (!nodes.some((node) => uploadAllowedNodes.includes(node.data.name))) { + return { + isSpeechToTextEnabled, + isImageUploadAllowed: false, + imgUploadSizeAndTypes + } + } + + nodes.forEach((node: IReactFlowNode) => { + if (uploadProcessingNodes.indexOf(node.data.name) > -1) { + // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties + node.data.inputParams.map((param: INodeParams) => { + if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) { + imgUploadSizeAndTypes.push({ + fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), + maxUploadSize: 5 + }) + isImageUploadAllowed = true + } + }) + } + }) + + return { + isSpeechToTextEnabled, + isImageUploadAllowed, + imgUploadSizeAndTypes + } + } + /** * Method that get chat messages. * @param {string} chatflowid From 0a54db71c121c8c24d89f1a671df6a0296dd38ae Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 12 Feb 2024 23:56:46 +0530 Subject: [PATCH 37/62] Update how uploads config is sent --- packages/server/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 218ae100..44c6eb34 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -406,7 +406,7 @@ export class App { try { const uploadsConfig = await this.areUploadsEnabled(req.params.id) const parsedConfig = JSON.parse(chatflow.chatbotConfig) - return res.json({ ...parsedConfig, ...uploadsConfig }) + return res.json({ ...parsedConfig, uploads: uploadsConfig }) } catch (e) { return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`) } From 11219c65490f289c4106337461a72fa0337a2f1d Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 13 Feb 2024 16:23:12 +0530 Subject: [PATCH 38/62] Fix audio recording not sending when recording stops --- packages/ui/src/views/chatmessage/ChatMessage.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 89fd941a..73d1c4a6 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -353,7 +353,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews if (!promptStarterInput && userInput.trim() === '') { const containsAudio = previews.filter((item) => item.type === 'audio').length > 0 - if (!(previews.length > 1 && containsAudio)) { + if (!(previews.length >= 1 && containsAudio)) { return } } @@ -585,7 +585,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews useEffect(() => { // wait for audio recording to load and then send const containsAudio = previews.filter((item) => item.type === 'audio').length > 0 - if (previews.length > 1 && containsAudio) { + if (previews.length >= 1 && containsAudio) { setIsRecording(false) setRecordingNotSupported(false) handlePromptClick('') From 205670375d43123b2cddf5f6c038a06e030d28ba Mon Sep 17 00:00:00 2001 From: Ilango Date: Wed, 14 Feb 2024 14:37:12 +0530 Subject: [PATCH 39/62] Check if uploads are enabled/changed on chatflow save and update chatbot config --- packages/server/src/index.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 44c6eb34..73e8d112 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -402,9 +402,9 @@ export class App { id: req.params.id }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) + const uploadsConfig = await this.getUploadsConfig(req.params.id) if (chatflow.chatbotConfig) { try { - const uploadsConfig = await this.areUploadsEnabled(req.params.id) const parsedConfig = JSON.parse(chatflow.chatbotConfig) return res.json({ ...parsedConfig, uploads: uploadsConfig }) } catch (e) { @@ -447,6 +447,14 @@ export class App { const updateChatFlow = new ChatFlow() Object.assign(updateChatFlow, body) + // check if image uploads or speech have been enabled and update chatbotConfig + const uploadsConfig = await this.getUploadsConfig(req.params.id) + if (uploadsConfig) { + // if there's existing chatbotConfig, merge uploadsConfig with it + // if not just add uploadsConfig to chatbotConfig + Object.assign(updateChatFlow, { chatbotConfig: { ...((chatflow.chatbotConfig ?? {}) as object), ...uploadsConfig } }) + } + updateChatFlow.id = chatflow.id createRateLimiter(updateChatFlow) @@ -523,7 +531,7 @@ export class App { // Check if chatflow valid for uploads this.app.get('/api/v1/chatflows-uploads/:id', async (req: Request, res: Response) => { try { - const uploadsConfig = await this.areUploadsEnabled(req.params.id) + const uploadsConfig = await this.getUploadsConfig(req.params.id) return res.json(uploadsConfig) } catch (e) { return res.status(500).send(e) @@ -1490,7 +1498,7 @@ export class App { * Method that checks if uploads are enabled in the chatflow * @param {string} chatflowid */ - async areUploadsEnabled(chatflowid: string): Promise { + async getUploadsConfig(chatflowid: string): Promise { const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ id: chatflowid }) From 56b21862a3444fbe8a52aecb24ced0c3cd0194ca Mon Sep 17 00:00:00 2001 From: Ilango Date: Wed, 14 Feb 2024 15:07:13 +0530 Subject: [PATCH 40/62] Send uploads config if available, even when chatbot config is not available --- packages/server/src/index.ts | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 73e8d112..be1e1cb8 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -403,9 +403,11 @@ export class App { }) if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) const uploadsConfig = await this.getUploadsConfig(req.params.id) - if (chatflow.chatbotConfig) { + // even if chatbotConfig is not set but uploads are enabled + // send uploadsConfig to the chatbot + if (chatflow.chatbotConfig || uploadsConfig) { try { - const parsedConfig = JSON.parse(chatflow.chatbotConfig) + const parsedConfig = chatflow.chatbotConfig ? JSON.parse(chatflow.chatbotConfig) : {} return res.json({ ...parsedConfig, uploads: uploadsConfig }) } catch (e) { return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`) @@ -447,14 +449,6 @@ export class App { const updateChatFlow = new ChatFlow() Object.assign(updateChatFlow, body) - // check if image uploads or speech have been enabled and update chatbotConfig - const uploadsConfig = await this.getUploadsConfig(req.params.id) - if (uploadsConfig) { - // if there's existing chatbotConfig, merge uploadsConfig with it - // if not just add uploadsConfig to chatbotConfig - Object.assign(updateChatFlow, { chatbotConfig: { ...((chatflow.chatbotConfig ?? {}) as object), ...uploadsConfig } }) - } - updateChatFlow.id = chatflow.id createRateLimiter(updateChatFlow) From 86da67f4674b191d4f4e749c4d915805fbfb253a Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 14 Feb 2024 20:20:43 +0800 Subject: [PATCH 41/62] add missing human text when image presents --- .../chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 9033b27f..7b39f1ed 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -45,17 +45,13 @@ export class ChatOpenAI extends LangchainChatOpenAI { const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption) if (messageContent?.length) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { - const lastMessage = messages[0].pop() - if (lastMessage instanceof HumanMessage) { - lastMessage.content = messageContent + // Change model to gpt-4-vision + this.modelName = 'gpt-4-vision-preview' - // Change model to gpt-4-vision - this.modelName = 'gpt-4-vision-preview' + // Change default max token to higher when using gpt-4-vision + this.maxTokens = 1024 - // Change default max token to higher when using gpt-4-vision - this.maxTokens = 1024 - } - messages[0].push(lastMessage as HumanMessage) + messages[0].push(new HumanMessage({ content: messageContent })) } } else { // revert to previous values if image upload is empty From 44c1f54d05e948184229c9fc60c08739bfb3ff50 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 14 Feb 2024 13:14:46 -0500 Subject: [PATCH 42/62] Showing image/audio files in the View Messages Dialog --- .../ui-component/dialog/ViewMessagesDialog.js | 63 ++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js index cadd4abd..6e206885 100644 --- a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js +++ b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js @@ -21,7 +21,9 @@ import { DialogTitle, ListItem, ListItemText, - Chip + Chip, + Card, + CardMedia } from '@mui/material' import { useTheme } from '@mui/material/styles' import DatePicker from 'react-datepicker' @@ -69,6 +71,12 @@ DatePickerCustomInput.propTypes = { onClick: PropTypes.func } +const messageImageStyle = { + width: '128px', + height: '128px', + objectFit: 'cover' +} + const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { const portalElement = document.getElementById('portal') const dispatch = useDispatch() @@ -249,6 +257,14 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { }) } } + if (chatmsg.fileUploads) { + chatmsg.fileUploads = JSON.parse(chatmsg.fileUploads) + chatmsg.fileUploads.forEach((file) => { + if (file.type === 'stored-file') { + file.data = `${baseURL}/api/v1/get-upload-file?chatflowId=${chatmsg.chatflowid}&chatId=${chatmsg.chatId}&fileName=${file.name}` + } + }) + } const obj = { ...chatmsg, message: chatmsg.content, @@ -672,6 +688,51 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { {message.message}
+ {message.fileUploads && message.fileUploads.length > 0 && ( +
+ {message.fileUploads.map((item, index) => { + return ( + <> + {item.mime.startsWith('image/') ? ( + + + + ) : ( + // eslint-disable-next-line jsx-a11y/media-has-caption + + )} + + ) + })} +
+ )} {message.fileAnnotations && (
{message.fileAnnotations.map((fileAnnotation, index) => { From a71c5a109d07b108674d0196b46183c408941b4a Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 14 Feb 2024 13:16:51 -0500 Subject: [PATCH 43/62] fix for concurrent requests for media handling --- .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 2 +- .../ChatOpenAI/FlowiseChatOpenAI.ts | 27 ++++++++++++++----- packages/components/src/multiModalUtils.ts | 3 +-- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index d94cc3a1..221d5e17 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -244,7 +244,7 @@ class ChatOpenAI_ChatModels implements INode { } } - const model = new ChatOpenAI(obj, { + const model = new ChatOpenAI(nodeData.id, obj, { baseURL: basePath, baseOptions: parsedBaseOptions }) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 7b39f1ed..396d7433 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -9,20 +9,33 @@ import { Callbacks } from '@langchain/core/callbacks/manager' import { ICommonObject, IMultiModalOption, INodeData } from '../../../src' import { addImagesToMessages } from '../../../src/multiModalUtils' +interface MultiModalOptions { + chainNodeData: INodeData + chainNodeOptions: ICommonObject +} + export class ChatOpenAI extends LangchainChatOpenAI { //TODO: Should be class variables and not static - public static chainNodeData: INodeData - public static chainNodeOptions: ICommonObject + // public static nodeData: INodeData + // public static nodeOptions: ICommonObject + private static chainNodeDataOptions: Map = new Map() configuredModel: string configuredMaxToken?: number multiModalOption?: IMultiModalOption + id: string + + public static injectChainNodeData(nodeData: INodeData, options: ICommonObject) { + ChatOpenAI.chainNodeDataOptions.set(nodeData.id, { chainNodeData: nodeData, chainNodeOptions: options }) + } constructor( + id: string, fields?: Partial & BaseChatModelParams & { openAIApiKey?: string; multiModalOption?: IMultiModalOption }, /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { super(fields, configuration) + this.id = id this.multiModalOption = fields?.multiModalOption this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo' this.configuredMaxToken = fields?.maxTokens @@ -33,15 +46,15 @@ export class ChatOpenAI extends LangchainChatOpenAI { } async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise { - if (ChatOpenAI.chainNodeData && ChatOpenAI.chainNodeOptions) { - await this.injectMultiModalMessages(messages) + if (ChatOpenAI.chainNodeDataOptions.has(this.id)) { + await this.injectMultiModalMessages(messages, ChatOpenAI.chainNodeDataOptions.get(this.id) as MultiModalOptions) } return super.generate(messages, options, callbacks) } - private async injectMultiModalMessages(messages: BaseMessageLike[][]) { - const nodeData = ChatOpenAI.chainNodeData - const optionsData = ChatOpenAI.chainNodeOptions + private async injectMultiModalMessages(messages: BaseMessageLike[][], nodeOptions: MultiModalOptions) { + const nodeData = nodeOptions.chainNodeData + const optionsData = nodeOptions.chainNodeOptions const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption) if (messageContent?.length) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index 6cbd75a0..c321f088 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -12,8 +12,7 @@ export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) if (model instanceof ChatOpenAI) { // TODO: this should not be static, need to figure out how to pass the nodeData and options to the invoke method - ChatOpenAI.chainNodeOptions = options - ChatOpenAI.chainNodeData = nodeData + ChatOpenAI.injectChainNodeData(nodeData, options) } } From 85809a9ecc82486a90a1f4cc39515dd28b796c04 Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 15 Feb 2024 04:03:10 +0800 Subject: [PATCH 44/62] fix for concurrency --- .../ConversationChain/ConversationChain.ts | 49 +++++++++++++------ packages/components/src/Interface.ts | 17 +++++++ packages/components/src/multiModalUtils.ts | 11 +++-- 3 files changed, 59 insertions(+), 18 deletions(-) diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index ad49d829..22a32c7b 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -2,14 +2,15 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../ import { ConversationChain } from 'langchain/chains' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' -import { BaseChatModel } from 'langchain/chat_models/base' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { RunnableSequence } from 'langchain/schema/runnable' import { StringOutputParser } from 'langchain/schema/output_parser' +import { HumanMessage } from 'langchain/schema' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { injectChainNodeData } from '../../../src/multiModalUtils' +import { addImagesToMessages } from '../../../src/multiModalUtils' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -93,7 +94,7 @@ class ConversationChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory - injectChainNodeData(nodeData, options) + // injectChainNodeData(nodeData, options) const chain = prepareChain(nodeData, options, this.sessionId) const moderations = nodeData.inputs?.inputModeration as Moderation[] @@ -145,7 +146,7 @@ class ConversationChain_Chains implements INode { } } -const prepareChatPrompt = (nodeData: INodeData) => { +const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => { const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate @@ -153,12 +154,10 @@ const prepareChatPrompt = (nodeData: INodeData) => { if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) { const sysPrompt = chatPromptTemplate.promptMessages[0] const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1] - const chatPrompt = ChatPromptTemplate.fromMessages([ - sysPrompt, - new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), - humanPrompt - ]) + const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt] + if (humanImageMessages.length) messages.push(...humanImageMessages) + const chatPrompt = ChatPromptTemplate.fromMessages(messages) if ((chatPromptTemplate as any).promptValues) { // @ts-ignore chatPrompt.promptValues = (chatPromptTemplate as any).promptValues @@ -167,22 +166,44 @@ const prepareChatPrompt = (nodeData: INodeData) => { return chatPrompt } - const chatPrompt = ChatPromptTemplate.fromMessages([ + const messages = [ SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage), new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) - ]) + ] + if (humanImageMessages.length) messages.push(...(humanImageMessages as any[])) + + const chatPrompt = ChatPromptTemplate.fromMessages(messages) return chatPrompt } const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { const chatHistory = options.chatHistory - const model = nodeData.inputs?.model as BaseChatModel + let model = nodeData.inputs?.model as ChatOpenAI const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' - const chatPrompt = prepareChatPrompt(nodeData) + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + let humanImageMessages: HumanMessage[] = [] + + if (messageContent?.length) { + // Change model to gpt-4-vision + model.modelName = 'gpt-4-vision-preview' + + // Change default max token to higher when using gpt-4-vision + model.maxTokens = 1024 + + for (const msg of messageContent) { + humanImageMessages.push(new HumanMessage({ content: [msg] })) + } + } else { + // revert to previous values if image upload is empty + model.modelName = model.configuredModel + model.maxTokens = model.configuredMaxToken + } + + const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages) let promptVariables = {} const promptValuesRaw = (chatPrompt as any).promptValues if (promptValuesRaw) { @@ -206,7 +227,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s }, ...promptVariables }, - prepareChatPrompt(nodeData), + prepareChatPrompt(nodeData, humanImageMessages), model, new StringOutputParser() ]) diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 44818733..62cd3ba9 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -21,6 +21,8 @@ export type CommonType = string | number | boolean | undefined | null export type MessageType = 'apiMessage' | 'userMessage' +export type ImageDetail = 'auto' | 'low' | 'high' + /** * Others */ @@ -158,6 +160,21 @@ export interface IMultiModalOption { audio?: Record } +export type MessageContentText = { + type: 'text' + text: string +} + +export type MessageContentImageUrl = { + type: 'image_url' + image_url: + | string + | { + url: string + detail?: ImageDetail + } +} + /** * Classes */ diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index c321f088..246821db 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,10 +1,9 @@ -import { ICommonObject, IFileUpload, IMultiModalOption, INodeData } from './Interface' +import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' import { BaseChatModel } from 'langchain/chat_models/base' import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' -import { MessageContent } from '@langchain/core/dist/messages' import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI' export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) => { @@ -16,8 +15,12 @@ export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) } } -export const addImagesToMessages = (nodeData: INodeData, options: ICommonObject, multiModalOption?: IMultiModalOption): MessageContent => { - const imageContent: MessageContent = [] +export const addImagesToMessages = ( + nodeData: INodeData, + options: ICommonObject, + multiModalOption?: IMultiModalOption +): MessageContentImageUrl[] => { + const imageContent: MessageContentImageUrl[] = [] let model = nodeData.inputs?.model if (model instanceof LangchainChatOpenAI && multiModalOption) { From 6acc921095b0b0cdb261c5501e25561d2fb4a770 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 14 Feb 2024 17:04:53 -0500 Subject: [PATCH 45/62] ViewMessages->Export Messages. Add Fullpath of the image/audio file. --- packages/server/src/index.ts | 6 +++++ packages/ui/src/api/chatmessage.js | 4 ++- .../ui-component/dialog/ViewMessagesDialog.js | 27 +++++++++++++++++-- packages/ui/src/utils/genericHelper.js | 22 +++++++++++++++ 4 files changed, 56 insertions(+), 3 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 851da8c8..4ad79d97 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1150,6 +1150,12 @@ export class App { } }) + this.app.get('/api/v1/get-upload-path', async (req: Request, res: Response) => { + return res.json({ + storagePath: getStoragePath() + }) + }) + // stream uploaded image this.app.get('/api/v1/get-upload-file', async (req: Request, res: Response) => { if (!req.query.chatflowId || !req.query.chatId || !req.query.fileName) { diff --git a/packages/ui/src/api/chatmessage.js b/packages/ui/src/api/chatmessage.js index 5f1a4bad..f1651247 100644 --- a/packages/ui/src/api/chatmessage.js +++ b/packages/ui/src/api/chatmessage.js @@ -4,10 +4,12 @@ const getInternalChatmessageFromChatflow = (id) => client.get(`/internal-chatmes const getAllChatmessageFromChatflow = (id, params = {}) => client.get(`/chatmessage/${id}`, { params: { order: 'DESC', ...params } }) const getChatmessageFromPK = (id, params = {}) => client.get(`/chatmessage/${id}`, { params: { order: 'ASC', ...params } }) const deleteChatmessage = (id, params = {}) => client.delete(`/chatmessage/${id}`, { params: { ...params } }) +const getStoragePath = () => client.get(`/get-upload-path`) export default { getInternalChatmessageFromChatflow, getAllChatmessageFromChatflow, getChatmessageFromPK, - deleteChatmessage + deleteChatmessage, + getStoragePath } diff --git a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js index 6e206885..2df501fb 100644 --- a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js +++ b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js @@ -49,7 +49,7 @@ import useApi from 'hooks/useApi' import useConfirm from 'hooks/useConfirm' // Utils -import { isValidURL, removeDuplicateURL } from 'utils/genericHelper' +import { getOS, isValidURL, removeDuplicateURL } from 'utils/genericHelper' import useNotifier from 'utils/useNotifier' import { baseURL } from 'store/constant' @@ -100,6 +100,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { const getChatmessageApi = useApi(chatmessageApi.getAllChatmessageFromChatflow) const getChatmessageFromPKApi = useApi(chatmessageApi.getChatmessageFromPK) + const getStoragePathFromServer = useApi(chatmessageApi.getStoragePath) + let storagePath = '' const onStartDateSelected = (date) => { setStartDate(date) @@ -128,16 +130,35 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { }) } - const exportMessages = () => { + const exportMessages = async () => { + if (!storagePath && getStoragePathFromServer.data) { + storagePath = getStoragePathFromServer.data.storagePath + } const obj = {} + let fileSeparator = '/' + if ('windows' === getOS()) { + fileSeparator = '\\' + } for (let i = 0; i < allChatlogs.length; i += 1) { const chatmsg = allChatlogs[i] const chatPK = getChatPK(chatmsg) + let filePaths = [] + if (chatmsg.fileUploads) { + chatmsg.fileUploads = JSON.parse(chatmsg.fileUploads) + chatmsg.fileUploads.forEach((file) => { + if (file.type === 'stored-file') { + filePaths.push( + `${storagePath}${fileSeparator}${chatmsg.chatflowid}${fileSeparator}${chatmsg.chatId}${fileSeparator}${file.name}` + ) + } + }) + } const msg = { content: chatmsg.content, role: chatmsg.role === 'apiMessage' ? 'bot' : 'user', time: chatmsg.createdDate } + if (filePaths.length) msg.filePaths = filePaths if (chatmsg.sourceDocuments) msg.sourceDocuments = JSON.parse(chatmsg.sourceDocuments) if (chatmsg.usedTools) msg.usedTools = JSON.parse(chatmsg.usedTools) if (chatmsg.fileAnnotations) msg.fileAnnotations = JSON.parse(chatmsg.fileAnnotations) @@ -373,6 +394,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { useEffect(() => { if (getChatmessageApi.data) { + getStoragePathFromServer.request() + setAllChatLogs(getChatmessageApi.data) const chatPK = processChatLogs(getChatmessageApi.data) setSelectedMessageIndex(0) diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index 74dc9578..645435d2 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -607,3 +607,25 @@ export const getConfigExamplesForCurl = (configData, bodyType, isMultiple, stopN } return finalStr } + +export const getOS = () => { + let userAgent = window.navigator.userAgent.toLowerCase(), + macosPlatforms = /(macintosh|macintel|macppc|mac68k|macos)/i, + windowsPlatforms = /(win32|win64|windows|wince)/i, + iosPlatforms = /(iphone|ipad|ipod)/i, + os = null + + if (macosPlatforms.test(userAgent)) { + os = 'macos' + } else if (iosPlatforms.test(userAgent)) { + os = 'ios' + } else if (windowsPlatforms.test(userAgent)) { + os = 'windows' + } else if (/android/.test(userAgent)) { + os = 'android' + } else if (!os && /linux/.test(userAgent)) { + os = 'linux' + } + + return os +} From 9c874bb49aa57617ebdb41ae748a0638e1530412 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 15 Feb 2024 09:08:49 -0500 Subject: [PATCH 46/62] Concurrency fixes - correcting wrong id --- .../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 396d7433..3943cfe3 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -25,7 +25,9 @@ export class ChatOpenAI extends LangchainChatOpenAI { id: string public static injectChainNodeData(nodeData: INodeData, options: ICommonObject) { - ChatOpenAI.chainNodeDataOptions.set(nodeData.id, { chainNodeData: nodeData, chainNodeOptions: options }) + if (nodeData.inputs?.model.id) { + ChatOpenAI.chainNodeDataOptions.set(nodeData.inputs?.model.id, { chainNodeData: nodeData, chainNodeOptions: options }) + } } constructor( From 52ffa1772bf29264fe1712ec95a846d3df307cfc Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 15 Feb 2024 18:18:36 -0500 Subject: [PATCH 47/62] Multimodal Fixes...removing all static methods/variables. --- .../ConversationalAgent.ts | 4 +- .../agents/MRKLAgentChat/MRKLAgentChat.ts | 6 +-- .../ConversationChain/ConversationChain.ts | 50 ++++++------------- .../nodes/chains/LLMChain/LLMChain.ts | 6 +-- .../ChatOpenAI/FlowiseChatOpenAI.ts | 27 +++------- packages/components/src/multiModalUtils.ts | 47 ++++++++++++++--- 6 files changed, 70 insertions(+), 70 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index c26063cf..e9bdd94b 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -9,7 +9,7 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { AgentExecutor } from '../../../src/agents' import { ChatConversationalAgent } from 'langchain/agents' import { renderTemplate } from '@langchain/core/prompts' -import { injectChainNodeData } from '../../../src/multiModalUtils' +import { injectAgentExecutorNodeData } from '../../../src/multiModalUtils' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -85,9 +85,9 @@ class ConversationalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory - injectChainNodeData(nodeData, options) const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + injectAgentExecutorNodeData(executor, nodeData, options) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index f3e9db3a..c14c9341 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -5,7 +5,7 @@ import { Tool } from 'langchain/tools' import { BaseLanguageModel } from 'langchain/base_language' import { flatten } from 'lodash' import { additionalCallbacks } from '../../../src/handler' -import { injectChainNodeData } from '../../../src/multiModalUtils' +import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils' class MRKLAgentChat_Agents implements INode { label: string @@ -48,14 +48,14 @@ class MRKLAgentChat_Agents implements INode { tools = flatten(tools) const executor = await initializeAgentExecutorWithOptions(tools, model, { agentType: 'chat-zero-shot-react-description', - verbose: process.env.DEBUG === 'true' ? true : false + verbose: process.env.DEBUG === 'true' }) return executor } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor - injectChainNodeData(nodeData, options) + injectLcAgentExecutorNodeData(executor, nodeData, options) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 22a32c7b..fce36b89 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -2,15 +2,14 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../ import { ConversationChain } from 'langchain/chains' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' +import { BaseChatModel } from 'langchain/chat_models/base' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { RunnableSequence } from 'langchain/schema/runnable' import { StringOutputParser } from 'langchain/schema/output_parser' -import { HumanMessage } from 'langchain/schema' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { addImagesToMessages } from '../../../src/multiModalUtils' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { injectRunnableNodeData } from '../../../src/multiModalUtils' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -94,9 +93,10 @@ class ConversationChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory - // injectChainNodeData(nodeData, options) const chain = prepareChain(nodeData, options, this.sessionId) + injectRunnableNodeData(chain, nodeData, options) + const moderations = nodeData.inputs?.inputModeration as Moderation[] if (moderations && moderations.length > 0) { @@ -146,7 +146,7 @@ class ConversationChain_Chains implements INode { } } -const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => { +const prepareChatPrompt = (nodeData: INodeData) => { const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate @@ -154,10 +154,12 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) { const sysPrompt = chatPromptTemplate.promptMessages[0] const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1] - const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt] - if (humanImageMessages.length) messages.push(...humanImageMessages) + const chatPrompt = ChatPromptTemplate.fromMessages([ + sysPrompt, + new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), + humanPrompt + ]) - const chatPrompt = ChatPromptTemplate.fromMessages(messages) if ((chatPromptTemplate as any).promptValues) { // @ts-ignore chatPrompt.promptValues = (chatPromptTemplate as any).promptValues @@ -166,44 +168,22 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage return chatPrompt } - const messages = [ + const chatPrompt = ChatPromptTemplate.fromMessages([ SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage), new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) - ] - if (humanImageMessages.length) messages.push(...(humanImageMessages as any[])) - - const chatPrompt = ChatPromptTemplate.fromMessages(messages) + ]) return chatPrompt } const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { const chatHistory = options.chatHistory - let model = nodeData.inputs?.model as ChatOpenAI + const model = nodeData.inputs?.model as BaseChatModel const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' - const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) - let humanImageMessages: HumanMessage[] = [] - - if (messageContent?.length) { - // Change model to gpt-4-vision - model.modelName = 'gpt-4-vision-preview' - - // Change default max token to higher when using gpt-4-vision - model.maxTokens = 1024 - - for (const msg of messageContent) { - humanImageMessages.push(new HumanMessage({ content: [msg] })) - } - } else { - // revert to previous values if image upload is empty - model.modelName = model.configuredModel - model.maxTokens = model.configuredMaxToken - } - - const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages) + const chatPrompt = prepareChatPrompt(nodeData) let promptVariables = {} const promptValuesRaw = (chatPrompt as any).promptValues if (promptValuesRaw) { @@ -227,7 +207,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s }, ...promptVariables }, - prepareChatPrompt(nodeData, humanImageMessages), + prepareChatPrompt(nodeData), model, new StringOutputParser() ]) diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index 18048283..1bc2f338 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -8,7 +8,7 @@ import { formatResponse, injectOutputParser } from '../../outputparsers/OutputPa import { BaseLLMOutputParser } from 'langchain/schema/output_parser' import { OutputFixingParser } from 'langchain/output_parsers' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' -import { injectChainNodeData } from '../../../src/multiModalUtils' +import { injectLLMChainNodeData } from '../../../src/multiModalUtils' class LLMChain_Chains implements INode { label: string @@ -108,7 +108,7 @@ class LLMChain_Chains implements INode { verbose: process.env.DEBUG === 'true' }) const inputVariables = chain.prompt.inputVariables as string[] // ["product"] - injectChainNodeData(nodeData, options) + injectLLMChainNodeData(nodeData, options) promptValues = injectOutputParser(this.outputParser, chain, promptValues) const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) // eslint-disable-next-line no-console @@ -138,7 +138,7 @@ class LLMChain_Chains implements INode { if (!this.outputParser && outputParser) { this.outputParser = outputParser } - injectChainNodeData(nodeData, options) + injectLLMChainNodeData(nodeData, options) promptValues = injectOutputParser(this.outputParser, chain, promptValues) const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) // eslint-disable-next-line no-console diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 3943cfe3..3884ae7d 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -6,30 +6,15 @@ import { BaseLanguageModelInput } from 'langchain/base_language' import { ChatOpenAICallOptions } from '@langchain/openai/dist/chat_models' import { BaseMessageChunk, BaseMessageLike, HumanMessage, LLMResult } from 'langchain/schema' import { Callbacks } from '@langchain/core/callbacks/manager' -import { ICommonObject, IMultiModalOption, INodeData } from '../../../src' -import { addImagesToMessages } from '../../../src/multiModalUtils' - -interface MultiModalOptions { - chainNodeData: INodeData - chainNodeOptions: ICommonObject -} +import { IMultiModalOption } from '../../../src' +import { addImagesToMessages, MultiModalOptions } from '../../../src/multiModalUtils' export class ChatOpenAI extends LangchainChatOpenAI { - //TODO: Should be class variables and not static - // public static nodeData: INodeData - // public static nodeOptions: ICommonObject - private static chainNodeDataOptions: Map = new Map() configuredModel: string configuredMaxToken?: number multiModalOption?: IMultiModalOption id: string - public static injectChainNodeData(nodeData: INodeData, options: ICommonObject) { - if (nodeData.inputs?.model.id) { - ChatOpenAI.chainNodeDataOptions.set(nodeData.inputs?.model.id, { chainNodeData: nodeData, chainNodeOptions: options }) - } - } - constructor( id: string, fields?: Partial & BaseChatModelParams & { openAIApiKey?: string; multiModalOption?: IMultiModalOption }, @@ -48,15 +33,15 @@ export class ChatOpenAI extends LangchainChatOpenAI { } async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise { - if (ChatOpenAI.chainNodeDataOptions.has(this.id)) { - await this.injectMultiModalMessages(messages, ChatOpenAI.chainNodeDataOptions.get(this.id) as MultiModalOptions) + if (this.lc_kwargs.chainData) { + await this.injectMultiModalMessages(messages, this.lc_kwargs.chainData) } return super.generate(messages, options, callbacks) } private async injectMultiModalMessages(messages: BaseMessageLike[][], nodeOptions: MultiModalOptions) { - const nodeData = nodeOptions.chainNodeData - const optionsData = nodeOptions.chainNodeOptions + const nodeData = nodeOptions.nodeData + const optionsData = nodeOptions.nodeOptions const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption) if (messageContent?.length) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index 246821db..81ae1999 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,20 +1,55 @@ import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' -import { BaseChatModel } from 'langchain/chat_models/base' import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { LLMChain } from 'langchain/chains' +import { RunnableBinding, RunnableSequence } from 'langchain/schema/runnable' +import { AgentExecutor as LcAgentExecutor, ChatAgent, RunnableAgent } from 'langchain/agents' +import { AgentExecutor } from './agents' -export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) => { - let model = nodeData.inputs?.model as BaseChatModel +export interface MultiModalOptions { + nodeData: INodeData + nodeOptions: ICommonObject +} - if (model instanceof ChatOpenAI) { - // TODO: this should not be static, need to figure out how to pass the nodeData and options to the invoke method - ChatOpenAI.injectChainNodeData(nodeData, options) +export const injectLLMChainNodeData = (nodeData: INodeData, options: ICommonObject) => { + let llmChain = nodeData.instance as LLMChain + ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } +} + +export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeData: INodeData, options: ICommonObject) => { + if (agentExecutor.agent instanceof RunnableAgent && agentExecutor.agent.runnable instanceof RunnableSequence) { + let rs = agentExecutor.agent.runnable as RunnableSequence + injectRunnableNodeData(rs, nodeData, options) } } +export const injectLcAgentExecutorNodeData = (agentExecutor: LcAgentExecutor, nodeData: INodeData, options: ICommonObject) => { + if (agentExecutor.agent instanceof ChatAgent) { + let llmChain = agentExecutor.agent.llmChain as LLMChain + ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } + } +} + +export const injectRunnableNodeData = (runnableSequence: RunnableSequence, nodeData: INodeData, options: ICommonObject) => { + runnableSequence.steps.forEach((step) => { + if (step instanceof ChatOpenAI) { + ;(step as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } + } + + if (step instanceof RunnableBinding) { + if ((step as RunnableBinding).bound instanceof ChatOpenAI) { + ;((step as RunnableBinding).bound as ChatOpenAI).lc_kwargs.chainData = { + nodeData: nodeData, + nodeOptions: options + } + } + } + }) +} + export const addImagesToMessages = ( nodeData: INodeData, options: ICommonObject, From 10fc1bf08d20d41db20b566961dad19e1af79db9 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 16 Feb 2024 08:18:58 -0500 Subject: [PATCH 48/62] Multimodal Fixes for cyclic (circular) dependencies during langsmith analysis... --- .../ChatOpenAI/FlowiseChatOpenAI.ts | 7 +- packages/components/src/multiModalUtils.ts | 64 +++++++++---------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 3884ae7d..acfc064c 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -39,10 +39,9 @@ export class ChatOpenAI extends LangchainChatOpenAI { return super.generate(messages, options, callbacks) } - private async injectMultiModalMessages(messages: BaseMessageLike[][], nodeOptions: MultiModalOptions) { - const nodeData = nodeOptions.nodeData - const optionsData = nodeOptions.nodeOptions - const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption) + private async injectMultiModalMessages(messages: BaseMessageLike[][], options: MultiModalOptions) { + const optionsData = options.nodeOptions + const messageContent = addImagesToMessages(optionsData, this.multiModalOption) if (messageContent?.length) { if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { // Change model to gpt-4-vision diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index 81ae1999..a7262324 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,5 +1,4 @@ import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' -import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' @@ -10,13 +9,12 @@ import { AgentExecutor as LcAgentExecutor, ChatAgent, RunnableAgent } from 'lang import { AgentExecutor } from './agents' export interface MultiModalOptions { - nodeData: INodeData nodeOptions: ICommonObject } export const injectLLMChainNodeData = (nodeData: INodeData, options: ICommonObject) => { let llmChain = nodeData.instance as LLMChain - ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } + ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } } export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeData: INodeData, options: ICommonObject) => { @@ -29,56 +27,58 @@ export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeDa export const injectLcAgentExecutorNodeData = (agentExecutor: LcAgentExecutor, nodeData: INodeData, options: ICommonObject) => { if (agentExecutor.agent instanceof ChatAgent) { let llmChain = agentExecutor.agent.llmChain as LLMChain - ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } + ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } } } export const injectRunnableNodeData = (runnableSequence: RunnableSequence, nodeData: INodeData, options: ICommonObject) => { runnableSequence.steps.forEach((step) => { if (step instanceof ChatOpenAI) { - ;(step as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options } + ;(step as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } } if (step instanceof RunnableBinding) { if ((step as RunnableBinding).bound instanceof ChatOpenAI) { ;((step as RunnableBinding).bound as ChatOpenAI).lc_kwargs.chainData = { - nodeData: nodeData, - nodeOptions: options + nodeOptions: getUploadsFromOptions(options) } } } }) } -export const addImagesToMessages = ( - nodeData: INodeData, - options: ICommonObject, - multiModalOption?: IMultiModalOption -): MessageContentImageUrl[] => { +const getUploadsFromOptions = (options: ICommonObject): ICommonObject => { + if (options?.uploads) { + return { + uploads: options.uploads, + chatflowid: options.chatflowid, + chatId: options.chatId + } + } + return {} +} + +export const addImagesToMessages = (options: ICommonObject, multiModalOption?: IMultiModalOption): MessageContentImageUrl[] => { const imageContent: MessageContentImageUrl[] = [] - let model = nodeData.inputs?.model - if (model instanceof LangchainChatOpenAI && multiModalOption) { - // Image Uploaded - if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) { - const imageUploads = getImageUploads(options.uploads) - for (const upload of imageUploads) { - let bf = upload.data - if (upload.type == 'stored-file') { - const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name) + // Image Uploaded + if (multiModalOption?.image && multiModalOption?.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) { + const imageUploads = getImageUploads(options.uploads) + for (const upload of imageUploads) { + if (upload.type == 'stored-file') { + const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name) - // as the image is stored in the server, read the file and convert it to base64 - const contents = fs.readFileSync(filePath) - bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') + // as the image is stored in the server, read the file and convert it to base64 + const contents = fs.readFileSync(filePath) + let bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') - imageContent.push({ - type: 'image_url', - image_url: { - url: bf, - detail: multiModalOption.image.imageResolution ?? 'low' - } - }) - } + imageContent.push({ + type: 'image_url', + image_url: { + url: bf, + detail: multiModalOption?.image.imageResolution ?? 'low' + } + }) } } } From 81c07dc8c1cc770b954e1278c127524cb9ff0561 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 19 Feb 2024 11:49:01 +0530 Subject: [PATCH 49/62] Update UI of speech to text dialog --- .../ui-component/dialog/SpeechToTextDialog.js | 291 ++++++++---------- .../ui-component/dropdown/AsyncDropdown.js | 6 +- .../views/canvas/CredentialInputHandler.js | 6 +- 3 files changed, 145 insertions(+), 158 deletions(-) diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 9fc11a72..60634664 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -13,14 +13,13 @@ import { DialogContent, DialogTitle, DialogActions, - Accordion, - AccordionSummary, - AccordionDetails, + FormControl, ListItem, ListItemAvatar, - ListItemText + ListItemText, + MenuItem, + Select } from '@mui/material' -import ExpandMoreIcon from '@mui/icons-material/ExpandMore' import { IconX } from '@tabler/icons' // Project import @@ -40,8 +39,8 @@ import useNotifier from 'utils/useNotifier' // API import chatflowsApi from 'api/chatflows' -const speechToTextProviders = [ - { +const speechToTextProviders = { + openAIWhisper: { label: 'OpenAI Whisper', name: 'openAIWhisper', icon: openAISVG, @@ -77,16 +76,10 @@ const speechToTextProviders = [ step: 0.1, description: `The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, optional: true - }, - { - label: 'On/Off', - name: 'status', - type: 'boolean', - optional: true } ] }, - { + assemblyAiTranscribe: { label: 'Assembly AI', name: 'assemblyAiTranscribe', icon: assemblyAIPng, @@ -97,16 +90,10 @@ const speechToTextProviders = [ name: 'credential', type: 'credential', credentialNames: ['assemblyAIApi'] - }, - { - label: 'On/Off', - name: 'status', - type: 'boolean', - optional: true } ] } -] +} const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { const portalElement = document.getElementById('portal') @@ -118,7 +105,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) const [speechToText, setSpeechToText] = useState({}) - const [providerExpanded, setProviderExpanded] = useState({}) + const [selectedProvider, setSelectedProvider] = useState('openAIWhisper') const onSave = async () => { try { @@ -169,8 +156,9 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { newVal[providerName][inputParamName] = value if (inputParamName === 'status' && value === true) { - //ensure that the others are turned off - speechToTextProviders.forEach((provider) => { + // ensure that the others are turned off + Object.keys(speechToTextProviders).forEach((key) => { + const provider = speechToTextProviders[key] if (provider.name !== providerName) { newVal[provider.name] = { ...speechToText[provider.name], status: false } } @@ -179,10 +167,9 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { setSpeechToText(newVal) } - const handleAccordionChange = (providerName) => (event, isExpanded) => { - const accordionProviders = { ...providerExpanded } - accordionProviders[providerName] = isExpanded - setProviderExpanded(accordionProviders) + const handleProviderChange = (event) => { + setSelectedProvider(event.target.value) + setValue(true, event.target.value, 'status') } useEffect(() => { @@ -197,7 +184,6 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { return () => { setSpeechToText({}) - setProviderExpanded({}) } }, [dialogProps]) @@ -220,136 +206,129 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { Speech To Text Configuration - {speechToTextProviders.map((provider, index) => ( - - } aria-controls={provider.name} id={provider.name}> - - -
- AI -
-
- - {provider.url} - + + Speech To Text Providers + + + + + <> + + +
+ AI +
+
+ + {speechToTextProviders[selectedProvider].url} + + } + /> + {speechToText[selectedProvider] && speechToText[selectedProvider].status && ( +
+
+ ON +
+ )} + + {speechToTextProviders[selectedProvider].inputs.map((inputParam, index) => ( + +
+ + {inputParam.label} + {!inputParam.optional &&  *} + {inputParam.description && ( + + )} + +
+ {inputParam.type === 'credential' && ( + setValue(newValue, selectedProvider, 'credentialId')} + /> + )} + {inputParam.type === 'boolean' && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? false } /> - {speechToText[provider.name] && speechToText[provider.name].status && ( -
-
- ON -
- )} - - - - {provider.inputs.map((inputParam, index) => ( - -
- - {inputParam.label} - {!inputParam.optional &&  *} - {inputParam.description && ( - - )} - -
- {providerExpanded[provider.name] && inputParam.type === 'credential' && ( - setValue(newValue, provider.name, 'credentialId')} - /> - )} - {inputParam.type === 'boolean' && ( - setValue(newValue, provider.name, inputParam.name)} - value={ - speechToText[provider.name] - ? speechToText[provider.name][inputParam.name] - : inputParam.default ?? false - } - /> - )} - {providerExpanded[provider.name] && - (inputParam.type === 'string' || - inputParam.type === 'password' || - inputParam.type === 'number') && ( - setValue(newValue, provider.name, inputParam.name)} - value={ - speechToText[provider.name] - ? speechToText[provider.name][inputParam.name] - : inputParam.default ?? '' - } - /> - )} + )} + {(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? '' + } + /> + )} - {providerExpanded[provider.name] && inputParam.type === 'options' && ( - setValue(newValue, provider.name, inputParam.name)} - value={ - speechToText[provider.name] - ? speechToText[provider.name][inputParam.name] - : inputParam.default ?? 'choose an option' - } - /> - )} -
- ))} -
- - ))} + {inputParam.type === 'options' && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? 'choose an option' + } + /> + )} + + ))} + diff --git a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js index b24fa02b..b98410a8 100644 --- a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js +++ b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js @@ -105,7 +105,11 @@ export const AsyncDropdown = ({ })() // eslint-disable-next-line react-hooks/exhaustive-deps - }, []) + }, [credentialNames]) + + useEffect(() => { + setInternalValue(value) + }, [value]) return ( <> diff --git a/packages/ui/src/views/canvas/CredentialInputHandler.js b/packages/ui/src/views/canvas/CredentialInputHandler.js index 4f874719..8285a00d 100644 --- a/packages/ui/src/views/canvas/CredentialInputHandler.js +++ b/packages/ui/src/views/canvas/CredentialInputHandler.js @@ -1,5 +1,5 @@ import PropTypes from 'prop-types' -import { useRef, useState } from 'react' +import { useEffect, useRef, useState } from 'react' // material-ui import { IconButton } from '@mui/material' @@ -88,6 +88,10 @@ const CredentialInputHandler = ({ inputParam, data, onSelect, disabled = false } setShowSpecificCredentialDialog(true) } + useEffect(() => { + setCredentialId(data?.credential ?? '') + }, [data]) + return (
{inputParam && ( From 5aa991ae56d910120b3bbb27e1ead14a81e79559 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 19 Feb 2024 12:15:43 +0530 Subject: [PATCH 50/62] Update how uploads are shown in view messages dialog --- .../ui-component/dialog/ViewMessagesDialog.js | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js index 2df501fb..a28f2c68 100644 --- a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js +++ b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js @@ -632,8 +632,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { sx={{ background: message.type === 'apiMessage' ? theme.palette.asyncSelect.main : '', - pl: 1, - pr: 1 + py: '1rem', + px: '1.5rem' }} key={index} style={{ display: 'flex', justifyContent: 'center', alignContent: 'center' }} @@ -683,34 +683,6 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { })}
)} -
- {/* Messages are being rendered in Markdown format */} - - ) : ( - - {children} - - ) - } - }} - > - {message.message} - -
{message.fileUploads && message.fileUploads.length > 0 && (
{ })}
)} +
+ {/* Messages are being rendered in Markdown format */} + + ) : ( + + {children} + + ) + } + }} + > + {message.message} + +
{message.fileAnnotations && (
{message.fileAnnotations.map((fileAnnotation, index) => { From d313dc67546932e949baec2291e55f7839d50566 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 19 Feb 2024 19:20:07 +0530 Subject: [PATCH 51/62] Show transcribed audio inputs as message along with audio clip in internal chat --- packages/server/src/index.ts | 5 ++++- .../src/ui-component/dialog/ViewMessagesDialog.js | 4 ++-- packages/ui/src/views/chatmessage/ChatMessage.js | 15 +++++++++++++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 22185170..38edfd8f 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1811,7 +1811,7 @@ export class App { } // Run Speech to Text conversion - if (upload.mime === 'audio/webm' && incomingInput.uploads?.length === 1) { + if (upload.mime === 'audio/webm') { let speechToTextConfig: ICommonObject = {} if (chatflow.speechToText) { const speechToTextProviders = JSON.parse(chatflow.speechToText) @@ -2111,6 +2111,9 @@ export class App { }) // Prepare response + // return the question in the response + // this is used when input text is empty but question is in audio format + result.question = incomingInput.question result.chatId = chatId result.chatMessageId = chatMessage.id if (sessionId) result.sessionId = sessionId diff --git a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js index a28f2c68..7456aa81 100644 --- a/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js +++ b/packages/ui/src/ui-component/dialog/ViewMessagesDialog.js @@ -688,9 +688,9 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { style={{ display: 'flex', flexWrap: 'wrap', - flexDirection: 'row', + flexDirection: 'column', width: '100%', - gap: '4px' + gap: '8px' }} > {message.fileUploads.map((item, index) => { diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 73d1c4a6..75a466d3 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -391,6 +391,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews if (!chatId) setChatId(data.chatId) + if (input === '' && data.question) { + // the response contains the question even if it was in an audio format + // so if input is empty but the response contains the question, update the user message to show the question + setMessages((prevMessages) => { + let allMessages = [...cloneDeep(prevMessages)] + if (allMessages[allMessages.length - 2].type === 'apiMessage') return allMessages + allMessages[allMessages.length - 2].message = data.question + return allMessages + }) + } + if (!isChatFlowAvailableToStream) { let text = '' if (data.text) text = data.text @@ -669,9 +680,9 @@ export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews style={{ display: 'flex', flexWrap: 'wrap', - flexDirection: 'row', + flexDirection: 'column', width: '100%', - gap: '4px' + gap: '8px' }} > {message.fileUploads.map((item, index) => { From 8bad360796e0a2d3c6c9c9902846b3b33bd123cb Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 19 Feb 2024 19:28:09 +0530 Subject: [PATCH 52/62] Remove status indicator in speech to text configuration --- .../ui-component/dialog/SpeechToTextDialog.js | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 60634664..30fc9f60 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -247,32 +247,6 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { } /> - {speechToText[selectedProvider] && speechToText[selectedProvider].status && ( -
-
- ON -
- )} {speechToTextProviders[selectedProvider].inputs.map((inputParam, index) => ( From b31e8715f46c6c2f32ec0ecf655b99685b03915c Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Mon, 19 Feb 2024 15:27:19 -0800 Subject: [PATCH 53/62] reverting all image upload logic to individual chains/agents --- .../ConversationalAgent.ts | 43 +++++++- .../agents/MRKLAgentChat/MRKLAgentChat.ts | 32 +++++- .../ConversationChain/ConversationChain.ts | 62 ++++++++---- .../nodes/chains/LLMChain/LLMChain.ts | 50 ++++++++-- .../ChatOpenAI/FlowiseChatOpenAI.ts | 34 +------ packages/components/src/multiModalUtils.ts | 98 +++++-------------- 6 files changed, 177 insertions(+), 142 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 2cbc1e67..052be861 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -11,7 +11,8 @@ import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { AgentExecutor } from '../../../src/agents' -import { injectAgentExecutorNodeData } from '../../../src/multiModalUtils' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { addImagesToMessages } from '../../../src/multiModalUtils' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -82,14 +83,19 @@ class ConversationalAgent_Agents implements INode { } async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { - return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory - const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) - injectAgentExecutorNodeData(executor, nodeData, options) + const executor = await prepareAgent( + nodeData, + options, + { sessionId: this.sessionId, chatId: options.chatId, input }, + options.chatHistory + ) + // injectAgentExecutorNodeData(executor, nodeData, options) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) @@ -123,6 +129,7 @@ class ConversationalAgent_Agents implements INode { const prepareAgent = async ( nodeData: INodeData, + options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }, chatHistory: IMessage[] = [] ) => { @@ -149,6 +156,32 @@ const prepareAgent = async ( outputParser }) + if (model instanceof ChatOpenAI) { + let humanImageMessages: HumanMessage[] = [] + const chatModel = model as ChatOpenAI + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + + if (messageContent?.length) { + // Change model to gpt-4-vision + chatModel.modelName = 'gpt-4-vision-preview' + + // Change default max token to higher when using gpt-4-vision + chatModel.maxTokens = 1024 + + for (const msg of messageContent) { + humanImageMessages.push(new HumanMessage({ content: [msg] })) + } + let messagePlaceholder = prompt.promptMessages.pop() + prompt.promptMessages.push(...humanImageMessages) + // @ts-ignore + prompt.promptMessages.push(messagePlaceholder) + } else { + // revert to previous values if image upload is empty + chatModel.modelName = chatModel.configuredModel + chatModel.maxTokens = chatModel.configuredMaxToken + } + } + const runnableAgent = RunnableSequence.from([ { [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, @@ -169,7 +202,7 @@ const prepareAgent = async ( sessionId: flowObj?.sessionId, chatId: flowObj?.chatId, input: flowObj?.input, - verbose: process.env.DEBUG === 'true' ? true : false + verbose: process.env.DEBUG === 'true' }) return executor diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index fd982b6c..98ed2e0c 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -7,7 +7,11 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { additionalCallbacks } from '../../../src/handler' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' -import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils' +import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI"; +import { HumanMessage } from "@langchain/core/messages"; +import { addImagesToMessages } from "../../../src/multiModalUtils"; +import { ChatPromptTemplate, SystemMessagePromptTemplate } from "langchain/prompts"; +// import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils' class MRKLAgentChat_Agents implements INode { label: string @@ -54,19 +58,39 @@ class MRKLAgentChat_Agents implements INode { tools = flatten(tools) const promptWithChat = await pull('hwchase17/react-chat') + let chatPromptTemplate = undefined + if (model instanceof ChatOpenAI) { + const chatModel = model as ChatOpenAI + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + + if (messageContent?.length) { + // Change model to gpt-4-vision + chatModel.modelName = 'gpt-4-vision-preview' + + // Change default max token to higher when using gpt-4-vision + chatModel.maxTokens = 1024 + const oldTemplate = promptWithChat.template as string + let chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) + chatPromptTemplate.promptMessages = [new HumanMessage({ content: messageContent })] + } else { + // revert to previous values if image upload is empty + chatModel.modelName = chatModel.configuredModel + chatModel.maxTokens = chatModel.configuredMaxToken + } + } const agent = await createReactAgent({ llm: model, tools, - prompt: promptWithChat + prompt: chatPromptTemplate ?? promptWithChat }) const executor = new AgentExecutor({ agent, tools, - verbose: process.env.DEBUG === 'true' ? true : false + verbose: process.env.DEBUG === 'true' }) - injectLcAgentExecutorNodeData(executor, nodeData, options) + // injectLcAgentExecutorNodeData(executor, nodeData, options) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index fd819a11..6ebeaba9 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,15 +1,16 @@ -import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts' -import { BaseChatModel } from '@langchain/core/language_models/chat_models' -import { RunnableSequence } from '@langchain/core/runnables' -import { StringOutputParser } from '@langchain/core/output_parsers' -import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { ConversationChain } from 'langchain/chains' +import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' -import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { RunnableSequence } from 'langchain/schema/runnable' +import { StringOutputParser } from 'langchain/schema/output_parser' +import { HumanMessage } from 'langchain/schema' +import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { injectRunnableNodeData } from '../../../src/multiModalUtils' +import { addImagesToMessages } from '../../../src/multiModalUtils' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -95,8 +96,6 @@ class ConversationChain_Chains implements INode { const memory = nodeData.inputs?.memory const chain = prepareChain(nodeData, options, this.sessionId) - injectRunnableNodeData(chain, nodeData, options) - const moderations = nodeData.inputs?.inputModeration as Moderation[] if (moderations && moderations.length > 0) { @@ -146,7 +145,7 @@ class ConversationChain_Chains implements INode { } } -const prepareChatPrompt = (nodeData: INodeData) => { +const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => { const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate @@ -154,12 +153,10 @@ const prepareChatPrompt = (nodeData: INodeData) => { if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) { const sysPrompt = chatPromptTemplate.promptMessages[0] const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1] - const chatPrompt = ChatPromptTemplate.fromMessages([ - sysPrompt, - new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), - humanPrompt - ]) + const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt] + if (humanImageMessages.length) messages.push(...humanImageMessages) + const chatPrompt = ChatPromptTemplate.fromMessages(messages) if ((chatPromptTemplate as any).promptValues) { // @ts-ignore chatPrompt.promptValues = (chatPromptTemplate as any).promptValues @@ -168,22 +165,47 @@ const prepareChatPrompt = (nodeData: INodeData) => { return chatPrompt } - const chatPrompt = ChatPromptTemplate.fromMessages([ + const messages = [ SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage), new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) - ]) + ] + if (humanImageMessages.length) messages.push(...(humanImageMessages as any[])) + + const chatPrompt = ChatPromptTemplate.fromMessages(messages) return chatPrompt } const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { const chatHistory = options.chatHistory - const model = nodeData.inputs?.model as BaseChatModel + let model = nodeData.inputs?.model const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' - const chatPrompt = prepareChatPrompt(nodeData) + let humanImageMessages: HumanMessage[] = [] + if (model instanceof ChatOpenAI) { + const chatModel = model as ChatOpenAI + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + + if (messageContent?.length) { + // Change model to gpt-4-vision + chatModel.modelName = 'gpt-4-vision-preview' + + // Change default max token to higher when using gpt-4-vision + chatModel.maxTokens = 1024 + + for (const msg of messageContent) { + humanImageMessages.push(new HumanMessage({ content: [msg] })) + } + } else { + // revert to previous values if image upload is empty + chatModel.modelName = chatModel.configuredModel + chatModel.maxTokens = chatModel.configuredMaxToken + } + } + + const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages) let promptVariables = {} const promptValuesRaw = (chatPrompt as any).promptValues if (promptValuesRaw) { @@ -207,7 +229,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s }, ...promptVariables }, - prepareChatPrompt(nodeData), + prepareChatPrompt(nodeData, humanImageMessages), model, new StringOutputParser() ]) diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index 9351dc11..361fabd9 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -6,8 +6,11 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' -import { injectLLMChainNodeData } from '../../../src/multiModalUtils' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { addImagesToMessages } from '../../../src/multiModalUtils' +import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' +import { HumanMessage } from 'langchain/schema' class LLMChain_Chains implements INode { label: string @@ -107,7 +110,6 @@ class LLMChain_Chains implements INode { verbose: process.env.DEBUG === 'true' }) const inputVariables = chain.prompt.inputVariables as string[] // ["product"] - injectLLMChainNodeData(nodeData, options) promptValues = injectOutputParser(this.outputParser, chain, promptValues) const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) // eslint-disable-next-line no-console @@ -137,7 +139,6 @@ class LLMChain_Chains implements INode { if (!this.outputParser && outputParser) { this.outputParser = outputParser } - injectLLMChainNodeData(nodeData, options) promptValues = injectOutputParser(this.outputParser, chain, promptValues) const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData) // eslint-disable-next-line no-console @@ -163,12 +164,7 @@ const runPrediction = async ( const socketIO = isStreaming ? options.socketIO : undefined const socketIOClientId = isStreaming ? options.socketIOClientId : '' const moderations = nodeData.inputs?.inputModeration as Moderation[] - /** - * Apply string transformation to reverse converted special chars: - * FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" } - * TO: { "value": "hello i am ben\n\n\thow are you?" } - */ - const promptValues = handleEscapeCharacters(promptValuesRaw, true) + let model = nodeData.inputs?.model as ChatOpenAI if (moderations && moderations.length > 0) { try { @@ -181,6 +177,42 @@ const runPrediction = async ( } } + /** + * Apply string transformation to reverse converted special chars: + * FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" } + * TO: { "value": "hello i am ben\n\n\thow are you?" } + */ + const promptValues = handleEscapeCharacters(promptValuesRaw, true) + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + if (chain.llm instanceof ChatOpenAI) { + const chatOpenAI = chain.llm as ChatOpenAI + if (messageContent?.length) { + // Change model to gpt-4-vision && max token to higher when using gpt-4-vision + chatOpenAI.modelName = 'gpt-4-vision-preview' + chatOpenAI.maxTokens = 1024 + // Add image to the message + if (chain.prompt instanceof PromptTemplate) { + const oldTemplate = chain.prompt.template as string + let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) + cp2.promptMessages = [new HumanMessage({ content: messageContent })] + chain.prompt = cp2 + } else if (chain.prompt instanceof ChatPromptTemplate) { + chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent })) + } else if (chain.prompt instanceof FewShotPromptTemplate) { + let currentPrompt = chain.prompt as FewShotPromptTemplate + const oldTemplate = currentPrompt.examplePrompt.template as string + let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) + cp2.promptMessages = [new HumanMessage({ content: messageContent })] + // @ts-ignore + currentPrompt.examplePrompt = cp2 + } + } else { + // revert to previous values if image upload is empty + chatOpenAI.modelName = model.configuredModel + chatOpenAI.maxTokens = model.configuredMaxToken + } + } + if (promptValues && inputVariables.length > 0) { let seen: string[] = [] diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 0986f26f..2fa65375 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -7,12 +7,9 @@ import { ChatOpenAICallOptions } from '@langchain/openai' import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' -import { BaseLanguageModelInput } from '@langchain/core/language_models/base' -import { BaseMessageChunk, BaseMessageLike, HumanMessage } from '@langchain/core/messages' -import { LLMResult } from '@langchain/core/outputs' -import { Callbacks } from '@langchain/core/callbacks/manager' import { IMultiModalOption } from '../../../src' -import { addImagesToMessages, MultiModalOptions } from '../../../src/multiModalUtils' +import { BaseMessageLike, LLMResult } from 'langchain/schema' +import { Callbacks } from '@langchain/core/callbacks/manager' export class ChatOpenAI extends LangchainChatOpenAI { configuredModel: string @@ -35,34 +32,7 @@ export class ChatOpenAI extends LangchainChatOpenAI { this.configuredMaxToken = fields?.maxTokens } - async invoke(input: BaseLanguageModelInput, options?: ChatOpenAICallOptions): Promise { - return super.invoke(input, options) - } - async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise { - if (this.lc_kwargs.chainData) { - await this.injectMultiModalMessages(messages, this.lc_kwargs.chainData) - } return super.generate(messages, options, callbacks) } - - private async injectMultiModalMessages(messages: BaseMessageLike[][], options: MultiModalOptions) { - const optionsData = options.nodeOptions - const messageContent = addImagesToMessages(optionsData, this.multiModalOption) - if (messageContent?.length) { - if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) { - // Change model to gpt-4-vision - this.modelName = 'gpt-4-vision-preview' - - // Change default max token to higher when using gpt-4-vision - this.maxTokens = 1024 - - messages[0].push(new HumanMessage({ content: messageContent })) - } - } else { - // revert to previous values if image upload is empty - this.modelName = this.configuredModel - this.maxTokens = this.configuredMaxToken - } - } } diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index a7262324..94414e58 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,84 +1,38 @@ import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' +import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' -import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { LLMChain } from 'langchain/chains' -import { RunnableBinding, RunnableSequence } from 'langchain/schema/runnable' -import { AgentExecutor as LcAgentExecutor, ChatAgent, RunnableAgent } from 'langchain/agents' -import { AgentExecutor } from './agents' -export interface MultiModalOptions { - nodeOptions: ICommonObject -} - -export const injectLLMChainNodeData = (nodeData: INodeData, options: ICommonObject) => { - let llmChain = nodeData.instance as LLMChain - ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } -} - -export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeData: INodeData, options: ICommonObject) => { - if (agentExecutor.agent instanceof RunnableAgent && agentExecutor.agent.runnable instanceof RunnableSequence) { - let rs = agentExecutor.agent.runnable as RunnableSequence - injectRunnableNodeData(rs, nodeData, options) - } -} - -export const injectLcAgentExecutorNodeData = (agentExecutor: LcAgentExecutor, nodeData: INodeData, options: ICommonObject) => { - if (agentExecutor.agent instanceof ChatAgent) { - let llmChain = agentExecutor.agent.llmChain as LLMChain - ;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } - } -} - -export const injectRunnableNodeData = (runnableSequence: RunnableSequence, nodeData: INodeData, options: ICommonObject) => { - runnableSequence.steps.forEach((step) => { - if (step instanceof ChatOpenAI) { - ;(step as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) } - } - - if (step instanceof RunnableBinding) { - if ((step as RunnableBinding).bound instanceof ChatOpenAI) { - ;((step as RunnableBinding).bound as ChatOpenAI).lc_kwargs.chainData = { - nodeOptions: getUploadsFromOptions(options) - } - } - } - }) -} - -const getUploadsFromOptions = (options: ICommonObject): ICommonObject => { - if (options?.uploads) { - return { - uploads: options.uploads, - chatflowid: options.chatflowid, - chatId: options.chatId - } - } - return {} -} - -export const addImagesToMessages = (options: ICommonObject, multiModalOption?: IMultiModalOption): MessageContentImageUrl[] => { +export const addImagesToMessages = ( + nodeData: INodeData, + options: ICommonObject, + multiModalOption?: IMultiModalOption +): MessageContentImageUrl[] => { const imageContent: MessageContentImageUrl[] = [] + let model = nodeData.inputs?.model - // Image Uploaded - if (multiModalOption?.image && multiModalOption?.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) { - const imageUploads = getImageUploads(options.uploads) - for (const upload of imageUploads) { - if (upload.type == 'stored-file') { - const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name) + if (model instanceof LangchainChatOpenAI && multiModalOption) { + // Image Uploaded + if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) { + const imageUploads = getImageUploads(options.uploads) + for (const upload of imageUploads) { + let bf = upload.data + if (upload.type == 'stored-file') { + const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name) - // as the image is stored in the server, read the file and convert it to base64 - const contents = fs.readFileSync(filePath) - let bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') + // as the image is stored in the server, read the file and convert it to base64 + const contents = fs.readFileSync(filePath) + bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') - imageContent.push({ - type: 'image_url', - image_url: { - url: bf, - detail: multiModalOption?.image.imageResolution ?? 'low' - } - }) + imageContent.push({ + type: 'image_url', + image_url: { + url: bf, + detail: multiModalOption.image.imageResolution ?? 'low' + } + }) + } } } } From 97a376d6e244721ffe37cfbb1f1fc4ff02585b37 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 20 Feb 2024 23:29:14 +0530 Subject: [PATCH 54/62] Fix local state sync issue, STT auth issue, and add none option for speech to text --- packages/server/src/index.ts | 10 +- .../ui-component/dialog/SpeechToTextDialog.js | 192 ++++++++++-------- packages/ui/src/ui-component/input/Input.js | 6 + 3 files changed, 116 insertions(+), 92 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 38edfd8f..3fb96dad 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1530,10 +1530,12 @@ export class App { if (chatflow.speechToText) { const speechToTextProviders = JSON.parse(chatflow.speechToText) for (const provider in speechToTextProviders) { - const providerObj = speechToTextProviders[provider] - if (providerObj.status) { - isSpeechToTextEnabled = true - break + if (provider !== 'none') { + const providerObj = speechToTextProviders[provider] + if (providerObj.status) { + isSpeechToTextEnabled = true + break + } } } } diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 30fc9f60..495e0258 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -175,7 +175,16 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { useEffect(() => { if (dialogProps.chatflow && dialogProps.chatflow.speechToText) { try { - setSpeechToText(JSON.parse(dialogProps.chatflow.speechToText)) + const speechToText = JSON.parse(dialogProps.chatflow.speechToText) + let selectedProvider = 'none' + Object.keys(speechToTextProviders).forEach((key) => { + const providerConfig = speechToText[key] + if (providerConfig.status) { + selectedProvider = key + } + }) + setSelectedProvider(selectedProvider) + setSpeechToText(speechToText) } catch (e) { setSpeechToText({}) console.error(e) @@ -193,7 +202,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { return () => dispatch({ type: HIDE_CANVAS_DIALOG }) }, [show, dispatch]) - const component = show ? ( + const component = ( { Speech To Text Providers - <> - - -
- + + +
-
-
- - {speechToTextProviders[selectedProvider].url} - - } - /> -
- {speechToTextProviders[selectedProvider].inputs.map((inputParam, index) => ( - -
- - {inputParam.label} - {!inputParam.optional &&  *} - {inputParam.description && ( - - )} - -
- {inputParam.type === 'credential' && ( - setValue(newValue, selectedProvider, 'credentialId')} - /> - )} - {inputParam.type === 'boolean' && ( - setValue(newValue, selectedProvider, inputParam.name)} - value={ - speechToText[selectedProvider] - ? speechToText[selectedProvider][inputParam.name] - : inputParam.default ?? false - } - /> - )} - {(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && ( - setValue(newValue, selectedProvider, inputParam.name)} - value={ - speechToText[selectedProvider] - ? speechToText[selectedProvider][inputParam.name] - : inputParam.default ?? '' - } - /> - )} + > + AI +
+
+ + {speechToTextProviders[selectedProvider].url} + + } + /> +
+ {speechToTextProviders[selectedProvider].inputs.map((inputParam, index) => ( + +
+ + {inputParam.label} + {!inputParam.optional &&  *} + {inputParam.description && ( + + )} + +
+ {inputParam.type === 'credential' && ( + setValue(newValue, selectedProvider, 'credentialId')} + /> + )} + {inputParam.type === 'boolean' && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? false + } + /> + )} + {(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? '' + } + /> + )} - {inputParam.type === 'options' && ( - setValue(newValue, selectedProvider, inputParam.name)} - value={ - speechToText[selectedProvider] - ? speechToText[selectedProvider][inputParam.name] - : inputParam.default ?? 'choose an option' - } - /> - )} -
- ))} - + {inputParam.type === 'options' && ( + setValue(newValue, selectedProvider, inputParam.name)} + value={ + speechToText[selectedProvider] + ? speechToText[selectedProvider][inputParam.name] + : inputParam.default ?? 'choose an option' + } + /> + )} + + ))} + + )} @@ -310,7 +326,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { - ) : null + ) return createPortal(component, portalElement) } diff --git a/packages/ui/src/ui-component/input/Input.js b/packages/ui/src/ui-component/input/Input.js index e59f012c..4bee74a6 100644 --- a/packages/ui/src/ui-component/input/Input.js +++ b/packages/ui/src/ui-component/input/Input.js @@ -48,6 +48,12 @@ export const Input = ({ inputParam, value, nodes, edges, nodeId, onChange, disab } }, [myValue]) + useEffect(() => { + if (value) { + setMyValue(value) + } + }, [value]) + return ( <> {inputParam.name === 'note' ? ( From 4cee518cbf93688fed26eda10c1bb4edcfffe874 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Tue, 20 Feb 2024 13:48:02 -0800 Subject: [PATCH 55/62] image uploads for mrkl agent --- .../components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 0d5b4c48..1ed831aa 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -79,8 +79,8 @@ class MRKLAgentChat_Agents implements INode { // Change default max token to higher when using gpt-4-vision chatModel.maxTokens = 1024 const oldTemplate = promptWithChat.template as string - let chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) - chatPromptTemplate.promptMessages = [new HumanMessage({ content: messageContent })] + chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) + chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) } else { // revert to previous values if image upload is empty chatModel.modelName = chatModel.configuredModel From a48edcd3a8326b8c946b979eb1df675f136f982e Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 21 Feb 2024 18:39:24 +0800 Subject: [PATCH 56/62] touchup fixes --- .../ConversationalAgent.ts | 30 +++++++++++-------- .../agents/MRKLAgentChat/MRKLAgentChat.ts | 22 +++++++------- .../ConversationChain/ConversationChain.ts | 25 ++++++++-------- .../nodes/chains/LLMChain/LLMChain.ts | 24 ++++++++------- 4 files changed, 54 insertions(+), 47 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 052be861..b2019a31 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -4,7 +4,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' -import { renderTemplate } from '@langchain/core/prompts' +import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts' import { RunnableSequence } from '@langchain/core/runnables' import { ChatConversationalAgent } from 'langchain/agents' import { getBaseClasses } from '../../../src/utils' @@ -141,11 +141,6 @@ const prepareAgent = async ( const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history' const inputKey = memory.inputKey ? memory.inputKey : 'input' - /** Bind a stop token to the model */ - const modelWithStop = model.bind({ - stop: ['\nObservation'] - }) - const outputParser = ChatConversationalAgent.getDefaultOutputParser({ llm: model, toolNames: tools.map((tool) => tool.name) @@ -158,30 +153,39 @@ const prepareAgent = async ( if (model instanceof ChatOpenAI) { let humanImageMessages: HumanMessage[] = [] - const chatModel = model as ChatOpenAI const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { // Change model to gpt-4-vision - chatModel.modelName = 'gpt-4-vision-preview' + model.modelName = 'gpt-4-vision-preview' // Change default max token to higher when using gpt-4-vision - chatModel.maxTokens = 1024 + model.maxTokens = 1024 for (const msg of messageContent) { humanImageMessages.push(new HumanMessage({ content: [msg] })) } - let messagePlaceholder = prompt.promptMessages.pop() + + // Pop the `agent_scratchpad` MessagePlaceHolder + let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder + + // Add the HumanMessage for images prompt.promptMessages.push(...humanImageMessages) - // @ts-ignore + + // Add the `agent_scratchpad` MessagePlaceHolder back prompt.promptMessages.push(messagePlaceholder) } else { // revert to previous values if image upload is empty - chatModel.modelName = chatModel.configuredModel - chatModel.maxTokens = chatModel.configuredMaxToken + model.modelName = model.configuredModel + model.maxTokens = model.configuredMaxToken } } + /** Bind a stop token to the model */ + const modelWithStop = model.bind({ + stop: ['\nObservation'] + }) + const runnableAgent = RunnableSequence.from([ { [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 1ed831aa..0d3b612c 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -11,8 +11,7 @@ import { createReactAgent } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { HumanMessage } from '@langchain/core/messages' import { addImagesToMessages } from '../../../src/multiModalUtils' -import { ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' -// import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils' +import { ChatPromptTemplate, HumanMessagePromptTemplate } from 'langchain/prompts' class MRKLAgentChat_Agents implements INode { label: string @@ -66,32 +65,33 @@ class MRKLAgentChat_Agents implements INode { let tools = nodeData.inputs?.tools as Tool[] tools = flatten(tools) - const promptWithChat = await pull('hwchase17/react-chat') + const prompt = await pull('hwchase17/react-chat') let chatPromptTemplate = undefined + if (model instanceof ChatOpenAI) { - const chatModel = model as ChatOpenAI const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { // Change model to gpt-4-vision - chatModel.modelName = 'gpt-4-vision-preview' + model.modelName = 'gpt-4-vision-preview' // Change default max token to higher when using gpt-4-vision - chatModel.maxTokens = 1024 - const oldTemplate = promptWithChat.template as string - chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) + model.maxTokens = 1024 + + const oldTemplate = prompt.template as string + chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)]) chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) } else { // revert to previous values if image upload is empty - chatModel.modelName = chatModel.configuredModel - chatModel.maxTokens = chatModel.configuredMaxToken + model.modelName = model.configuredModel + model.maxTokens = model.configuredMaxToken } } const agent = await createReactAgent({ llm: model, tools, - prompt: chatPromptTemplate ?? promptWithChat + prompt: chatPromptTemplate ?? prompt }) const executor = new AgentExecutor({ diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 6ebeaba9..25d80bee 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,16 +1,16 @@ import { ConversationChain } from 'langchain/chains' -import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' -import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' -import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' -import { RunnableSequence } from 'langchain/schema/runnable' -import { StringOutputParser } from 'langchain/schema/output_parser' -import { HumanMessage } from 'langchain/schema' +import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts' +import { RunnableSequence } from '@langchain/core/runnables' +import { StringOutputParser } from '@langchain/core/output_parsers' +import { HumanMessage } from '@langchain/core/messages' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { addImagesToMessages } from '../../../src/multiModalUtils' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -179,29 +179,28 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { const chatHistory = options.chatHistory - let model = nodeData.inputs?.model + let model = nodeData.inputs?.model as ChatOpenAI const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' let humanImageMessages: HumanMessage[] = [] if (model instanceof ChatOpenAI) { - const chatModel = model as ChatOpenAI const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { // Change model to gpt-4-vision - chatModel.modelName = 'gpt-4-vision-preview' + model.modelName = 'gpt-4-vision-preview' // Change default max token to higher when using gpt-4-vision - chatModel.maxTokens = 1024 + model.maxTokens = 1024 for (const msg of messageContent) { humanImageMessages.push(new HumanMessage({ content: [msg] })) } } else { // revert to previous values if image upload is empty - chatModel.modelName = chatModel.configuredModel - chatModel.maxTokens = chatModel.configuredMaxToken + model.modelName = model.configuredModel + model.maxTokens = model.configuredMaxToken } } diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index 361fabd9..c60b4b29 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -1,5 +1,6 @@ import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers' +import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts' import { OutputFixingParser } from 'langchain/output_parsers' import { LLMChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' @@ -9,7 +10,6 @@ import { checkInputs, Moderation, streamResponse } from '../../moderation/Modera import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages } from '../../../src/multiModalUtils' -import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' import { HumanMessage } from 'langchain/schema' class LLMChain_Chains implements INode { @@ -184,6 +184,7 @@ const runPrediction = async ( */ const promptValues = handleEscapeCharacters(promptValuesRaw, true) const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + if (chain.llm instanceof ChatOpenAI) { const chatOpenAI = chain.llm as ChatOpenAI if (messageContent?.length) { @@ -192,19 +193,22 @@ const runPrediction = async ( chatOpenAI.maxTokens = 1024 // Add image to the message if (chain.prompt instanceof PromptTemplate) { - const oldTemplate = chain.prompt.template as string - let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) - cp2.promptMessages = [new HumanMessage({ content: messageContent })] - chain.prompt = cp2 + const existingPromptTemplate = chain.prompt.template as string + let newChatPromptTemplate = ChatPromptTemplate.fromMessages([ + HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate) + ]) + newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) + chain.prompt = newChatPromptTemplate } else if (chain.prompt instanceof ChatPromptTemplate) { chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent })) } else if (chain.prompt instanceof FewShotPromptTemplate) { - let currentPrompt = chain.prompt as FewShotPromptTemplate - const oldTemplate = currentPrompt.examplePrompt.template as string - let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)]) - cp2.promptMessages = [new HumanMessage({ content: messageContent })] + let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string + let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([ + HumanMessagePromptTemplate.fromTemplate(existingFewShotPromptTemplate) + ]) + newFewShotPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) // @ts-ignore - currentPrompt.examplePrompt = cp2 + chain.prompt.examplePrompt = newFewShotPromptTemplate } } else { // revert to previous values if image upload is empty From 4071fe58bebf4ae22ed1bab1c8f570739a8351a2 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 21 Feb 2024 19:05:23 +0800 Subject: [PATCH 57/62] add default none option --- packages/ui/src/ui-component/dialog/SpeechToTextDialog.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 495e0258..7f30d14b 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -105,7 +105,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) const [speechToText, setSpeechToText] = useState({}) - const [selectedProvider, setSelectedProvider] = useState('openAIWhisper') + const [selectedProvider, setSelectedProvider] = useState('none') const onSave = async () => { try { From e86550a91abc3670de3116cb215a9fbaf82bcb6c Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 22 Feb 2024 13:44:04 +0800 Subject: [PATCH 58/62] update marketplace templates --- .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 5 +- .../chatflows/API Agent OpenAI.json | 78 +++++++++++- .../marketplaces/chatflows/API Agent.json | 117 +++++++++++++++++- .../Advanced Structured Output Parser.json | 39 +++++- .../marketplaces/chatflows/Antonym.json | 39 +++++- .../marketplaces/chatflows/AutoGPT.json | 39 +++++- .../marketplaces/chatflows/BabyAGI.json | 39 +++++- .../marketplaces/chatflows/CSV Agent.json | 39 +++++- .../chatflows/Chat with a Podcast.json | 39 +++++- .../marketplaces/chatflows/ChatGPTPlugin.json | 39 +++++- .../chatflows/Conversational Agent.json | 39 +++++- .../Conversational Retrieval Agent.json | 39 +++++- .../Conversational Retrieval QA Chain.json | 39 +++++- .../chatflows/Flowise Docs QnA.json | 39 +++++- .../server/marketplaces/chatflows/IfElse.json | 39 +++++- .../chatflows/Image Generation.json | 39 +++++- .../chatflows/Input Moderation.json | 39 +++++- .../chatflows/List Output Parser.json | 39 +++++- .../chatflows/Long Term Memory.json | 39 +++++- .../chatflows/Metadata Filter.json | 39 +++++- .../chatflows/Multi Prompt Chain.json | 39 +++++- .../chatflows/Multi Retrieval QA Chain.json | 39 +++++- .../chatflows/Multiple VectorDB.json | 117 +++++++++++++++++- .../marketplaces/chatflows/OpenAI Agent.json | 39 +++++- .../Prompt Chaining with VectorStore.json | 78 +++++++++++- .../marketplaces/chatflows/ReAct Agent.json | 39 +++++- .../marketplaces/chatflows/SQL DB Chain.json | 39 +++++- .../marketplaces/chatflows/SQL Prompt.json | 117 +++++++++++++++++- .../chatflows/Simple Conversation Chain.json | 39 +++++- .../chatflows/Structured Output Parser.json | 39 +++++- .../marketplaces/chatflows/Translator.json | 39 +++++- .../marketplaces/chatflows/WebBrowser.json | 78 +++++++++++- .../marketplaces/chatflows/WebPage QnA.json | 39 +++++- 33 files changed, 1520 insertions(+), 84 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index 02117c8c..cc0b0efa 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -21,7 +21,7 @@ class ChatOpenAI_ChatModels implements INode { constructor() { this.label = 'ChatOpenAI' this.name = 'chatOpenAI' - this.version = 4.0 + this.version = 5.0 this.type = 'ChatOpenAI' this.icon = 'openai.svg' this.category = 'Chat Models' @@ -175,7 +175,8 @@ class ChatOpenAI_ChatModels implements INode { label: 'Allow Image Uploads', name: 'allowImageUploads', type: 'boolean', - description: 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat', + description: + 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', default: false, optional: true }, diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index 4e8727da..691852d6 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -90,7 +90,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -237,6 +237,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -257,7 +290,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -437,7 +472,7 @@ "data": { "id": "chatOpenAI_2", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -584,6 +619,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" } ], "inputAnchors": [ @@ -604,7 +672,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index a0fed334..facdcb6b 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -398,7 +398,7 @@ "data": { "id": "chatOpenAI_2", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -545,6 +545,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" } ], "inputAnchors": [ @@ -565,7 +598,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -597,7 +632,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -744,6 +779,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -764,7 +832,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -796,7 +866,7 @@ "data": { "id": "chatOpenAI_3", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -943,6 +1013,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_3-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_3-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_3-input-imageResolution-options" } ], "inputAnchors": [ @@ -963,7 +1066,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json index c6480dd1..8618bf86 100644 --- a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json @@ -181,7 +181,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -334,6 +334,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -355,7 +388,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Antonym.json b/packages/server/marketplaces/chatflows/Antonym.json index 5cf6d85c..101d0430 100644 --- a/packages/server/marketplaces/chatflows/Antonym.json +++ b/packages/server/marketplaces/chatflows/Antonym.json @@ -177,7 +177,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -324,6 +324,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -344,7 +377,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index c3ed0721..bb7c7bdc 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -253,7 +253,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -400,6 +400,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -420,7 +453,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index cf8df770..8a800046 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -346,7 +346,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -499,6 +499,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -520,7 +553,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index af2cfdd7..0a0bdce9 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -72,7 +72,7 @@ "id": "chatOpenAI_0", "label": "ChatOpenAI", "name": "chatOpenAI", - "version": 4, + "version": 5, "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], "category": "Chat Models", @@ -218,6 +218,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -238,7 +271,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index b3b3a4a1..6d0344a3 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -196,7 +196,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -349,6 +349,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -370,7 +403,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index 4f0f7f5f..cbdc4634 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -217,7 +217,7 @@ "id": "chatOpenAI_0", "label": "ChatOpenAI", "name": "chatOpenAI", - "version": 4, + "version": 5, "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], "category": "Chat Models", @@ -363,6 +363,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -383,7 +416,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index c39b4d65..d07047d6 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -158,7 +158,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -305,6 +305,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -325,7 +358,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 1de16cd8..72ac467e 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -489,7 +489,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -642,6 +642,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -663,7 +696,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index 08e60afc..df3d1389 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -371,7 +371,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -524,6 +524,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -545,7 +578,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 7f33ca48..62c72595 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -378,7 +378,7 @@ "id": "chatOpenAI_0", "label": "ChatOpenAI", "name": "chatOpenAI", - "version": 4, + "version": 5, "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], "category": "Chat Models", @@ -524,6 +524,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -544,7 +577,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/IfElse.json b/packages/server/marketplaces/chatflows/IfElse.json index 7516b19f..cdee6d1d 100644 --- a/packages/server/marketplaces/chatflows/IfElse.json +++ b/packages/server/marketplaces/chatflows/IfElse.json @@ -910,7 +910,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -1063,6 +1063,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -1084,7 +1117,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Image Generation.json b/packages/server/marketplaces/chatflows/Image Generation.json index b97682bd..f798b5a3 100644 --- a/packages/server/marketplaces/chatflows/Image Generation.json +++ b/packages/server/marketplaces/chatflows/Image Generation.json @@ -454,7 +454,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -607,6 +607,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -628,7 +661,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Input Moderation.json b/packages/server/marketplaces/chatflows/Input Moderation.json index efc59707..e35a481d 100644 --- a/packages/server/marketplaces/chatflows/Input Moderation.json +++ b/packages/server/marketplaces/chatflows/Input Moderation.json @@ -166,7 +166,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -319,6 +319,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -340,7 +373,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/List Output Parser.json b/packages/server/marketplaces/chatflows/List Output Parser.json index 46b8c058..5e8602f0 100644 --- a/packages/server/marketplaces/chatflows/List Output Parser.json +++ b/packages/server/marketplaces/chatflows/List Output Parser.json @@ -225,7 +225,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -378,6 +378,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -399,7 +432,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index f9ff3d0f..bc3b8a76 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -508,7 +508,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -661,6 +661,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -682,7 +715,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index b12a6be8..147a8cf6 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -455,7 +455,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -608,6 +608,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -629,7 +662,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 12917939..41cd9b17 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -280,7 +280,7 @@ "id": "chatOpenAI_0", "label": "ChatOpenAI", "name": "chatOpenAI", - "version": 4, + "version": 5, "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], "category": "Chat Models", @@ -426,6 +426,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -446,7 +479,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index d2d345a3..8f762ca9 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -390,7 +390,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -543,6 +543,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -564,7 +597,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index 82b6d2d6..db17df54 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -487,7 +487,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -640,6 +640,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -661,7 +694,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -1013,7 +1048,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -1166,6 +1201,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -1187,7 +1255,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -1219,7 +1289,7 @@ "data": { "id": "chatOpenAI_2", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -1372,6 +1442,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" } ], "inputAnchors": [ @@ -1393,7 +1496,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index 3792e474..f405640c 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -281,7 +281,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -428,6 +428,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -448,7 +481,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json index a0664390..bb0c284f 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json @@ -429,7 +429,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -582,6 +582,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -603,7 +636,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -635,7 +670,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -788,6 +823,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -809,7 +877,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index a8c55b40..a4989c47 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -206,7 +206,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -359,6 +359,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -380,7 +413,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index 29ddeb7d..debe4edc 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -15,7 +15,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -162,6 +162,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -182,7 +215,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/SQL Prompt.json b/packages/server/marketplaces/chatflows/SQL Prompt.json index cbad6a97..cfdb317a 100644 --- a/packages/server/marketplaces/chatflows/SQL Prompt.json +++ b/packages/server/marketplaces/chatflows/SQL Prompt.json @@ -175,7 +175,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -328,6 +328,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -349,7 +382,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -381,7 +416,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -534,6 +569,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -555,7 +623,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -1313,7 +1383,7 @@ "data": { "id": "chatOpenAI_2", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -1466,6 +1536,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" } ], "inputAnchors": [ @@ -1487,7 +1590,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json index ea9a8d39..d3688e0e 100644 --- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json +++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json @@ -16,7 +16,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -169,6 +169,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -190,7 +223,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index f23a3011..b1978cc1 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -16,7 +16,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -169,6 +169,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -190,7 +223,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Translator.json b/packages/server/marketplaces/chatflows/Translator.json index cc83622b..0155ca46 100644 --- a/packages/server/marketplaces/chatflows/Translator.json +++ b/packages/server/marketplaces/chatflows/Translator.json @@ -84,7 +84,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -231,6 +231,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -251,7 +284,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index 9ddf595f..d8b7d9f6 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -127,7 +127,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -274,6 +274,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -294,7 +327,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { @@ -433,7 +468,7 @@ "data": { "id": "chatOpenAI_1", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], @@ -580,6 +615,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" } ], "inputAnchors": [ @@ -600,7 +668,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 126143b8..5ca29ee9 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -394,7 +394,7 @@ "data": { "id": "chatOpenAI_0", "label": "ChatOpenAI", - "version": 4, + "version": 5, "name": "chatOpenAI", "type": "ChatOpenAI", "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -547,6 +547,39 @@ "optional": true, "additionalParams": true, "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" } ], "inputAnchors": [ @@ -568,7 +601,9 @@ "presencePenalty": "", "timeout": "", "basepath": "", - "baseOptions": "" + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" }, "outputAnchors": [ { From 7e84268f0d4f1cdfcafb5a9b4897bfed3c1bac38 Mon Sep 17 00:00:00 2001 From: Ilango Date: Fri, 23 Feb 2024 15:59:14 +0530 Subject: [PATCH 59/62] Add content-disposition package for handling content disposition response header --- packages/server/package.json | 2 ++ packages/server/src/index.ts | 45 ++++++++++++++++++++---------------- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/packages/server/package.json b/packages/server/package.json index 3698a216..cff39a89 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -48,6 +48,7 @@ "@oclif/core": "^1.13.10", "async-mutex": "^0.4.0", "axios": "1.6.2", + "content-disposition": "0.5.4", "cors": "^2.8.5", "crypto-js": "^4.1.1", "dotenv": "^16.0.0", @@ -70,6 +71,7 @@ "winston": "^3.9.0" }, "devDependencies": { + "@types/content-disposition": "0.5.8", "@types/cors": "^2.8.12", "@types/crypto-js": "^4.1.1", "@types/multer": "^1.4.7", diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 824a217f..938a2351 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -5,6 +5,7 @@ import cors from 'cors' import http from 'http' import * as fs from 'fs' import basicAuth from 'express-basic-auth' +import contentDisposition from 'content-disposition' import { Server } from 'socket.io' import logger from './utils/logger' import { expressRequestLogger } from './utils/logger' @@ -1143,7 +1144,7 @@ export class App { if (!(filePath.includes('.flowise') && filePath.includes('openai-assistant'))) return res.status(500).send(`Invalid file path`) if (fs.existsSync(filePath)) { - res.setHeader('Content-Disposition', 'attachment; filename=' + path.basename(filePath)) + res.setHeader('Content-Disposition', contentDisposition(path.basename(filePath))) streamFileToUser(res, filePath) } else { return res.status(404).send(`File ${req.body.fileName} not found`) @@ -1158,27 +1159,31 @@ export class App { // stream uploaded image this.app.get('/api/v1/get-upload-file', async (req: Request, res: Response) => { - if (!req.query.chatflowId || !req.query.chatId || !req.query.fileName) { + try { + if (!req.query.chatflowId || !req.query.chatId || !req.query.fileName) { + return res.status(500).send(`Invalid file path`) + } + const chatflowId = req.query.chatflowId as string + const chatId = req.query.chatId as string + const fileName = req.query.fileName as string + + const filePath = path.join(getStoragePath(), chatflowId, chatId, fileName) + //raise error if file path is not absolute + if (!path.isAbsolute(filePath)) return res.status(500).send(`Invalid file path`) + //raise error if file path contains '..' + if (filePath.includes('..')) return res.status(500).send(`Invalid file path`) + //only return from the storage folder + if (!filePath.startsWith(getStoragePath())) return res.status(500).send(`Invalid file path`) + + if (fs.existsSync(filePath)) { + res.setHeader('Content-Disposition', contentDisposition(path.basename(filePath))) + streamFileToUser(res, filePath) + } else { + return res.status(404).send(`File ${fileName} not found`) + } + } catch (error) { return res.status(500).send(`Invalid file path`) } - const chatflowId = req.query.chatflowId as string - const chatId = req.query.chatId as string - const fileName = req.query.fileName as string - - const filePath = path.join(getStoragePath(), chatflowId, chatId, fileName) - //raise error if file path is not absolute - if (!path.isAbsolute(filePath)) return res.status(500).send(`Invalid file path`) - //raise error if file path contains '..' - if (filePath.includes('..')) return res.status(500).send(`Invalid file path`) - //only return from the storage folder - if (!filePath.startsWith(getStoragePath())) return res.status(500).send(`Invalid file path`) - - if (fs.existsSync(filePath)) { - res.setHeader('Content-Disposition', 'attachment; filename=' + path.basename(filePath)) - streamFileToUser(res, filePath) - } else { - return res.status(404).send(`File ${fileName} not found`) - } }) // ---------------------------------------- From e55975ec7f9db17a4e13ff4f72b0cbe2a39ac208 Mon Sep 17 00:00:00 2001 From: Ilango Date: Fri, 23 Feb 2024 18:40:58 +0530 Subject: [PATCH 60/62] Revert useEffect in async dropdown and input components --- packages/ui/src/ui-component/dropdown/AsyncDropdown.js | 4 ---- packages/ui/src/ui-component/input/Input.js | 6 ------ 2 files changed, 10 deletions(-) diff --git a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js index b98410a8..84c11e25 100644 --- a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js +++ b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js @@ -107,10 +107,6 @@ export const AsyncDropdown = ({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [credentialNames]) - useEffect(() => { - setInternalValue(value) - }, [value]) - return ( <> { - if (value) { - setMyValue(value) - } - }, [value]) - return ( <> {inputParam.name === 'note' ? ( From b884e93ba29f5929c298468f72d8e6e88d3b124d Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 24 Feb 2024 15:39:07 +0800 Subject: [PATCH 61/62] fix speech to text dialog credential, fix url changed when clicked settings menu item --- .../ConversationalAgent.ts | 1 - .../agents/MRKLAgentChat/MRKLAgentChat.ts | 1 - .../ui-component/dialog/SpeechToTextDialog.js | 9 ++- .../ui-component/dropdown/AsyncDropdown.js | 2 +- packages/ui/src/views/settings/index.js | 75 +++++++++++++++---- 5 files changed, 70 insertions(+), 18 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index b2019a31..db6b37c6 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -95,7 +95,6 @@ class ConversationalAgent_Agents implements INode { { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory ) - // injectAgentExecutorNodeData(executor, nodeData, options) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 0d3b612c..d59de540 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -99,7 +99,6 @@ class MRKLAgentChat_Agents implements INode { tools, verbose: process.env.DEBUG === 'true' }) - // injectLcAgentExecutorNodeData(executor, nodeData, options) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 7f30d14b..1366ea1a 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -272,8 +272,9 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => {
{inputParam.type === 'credential' && ( { )} - + Save diff --git a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js index 84c11e25..b24fa02b 100644 --- a/packages/ui/src/ui-component/dropdown/AsyncDropdown.js +++ b/packages/ui/src/ui-component/dropdown/AsyncDropdown.js @@ -105,7 +105,7 @@ export const AsyncDropdown = ({ })() // eslint-disable-next-line react-hooks/exhaustive-deps - }, [credentialNames]) + }, []) return ( <> diff --git a/packages/ui/src/views/settings/index.js b/packages/ui/src/views/settings/index.js index 8d76cc0d..f7019b68 100644 --- a/packages/ui/src/views/settings/index.js +++ b/packages/ui/src/views/settings/index.js @@ -1,9 +1,11 @@ -import { useState, useEffect } from 'react' +import { useState, useEffect, useRef } from 'react' import PropTypes from 'prop-types' +import { useSelector } from 'react-redux' // material-ui import { useTheme } from '@mui/material/styles' -import { Box, List, Paper, Popper, ClickAwayListener } from '@mui/material' +import { ListItemButton, ListItemIcon, ListItemText, Typography, Box, List, Paper, Popper, ClickAwayListener } from '@mui/material' +import FiberManualRecordIcon from '@mui/icons-material/FiberManualRecord' // third-party import PerfectScrollbar from 'react-perfect-scrollbar' @@ -11,8 +13,6 @@ import PerfectScrollbar from 'react-perfect-scrollbar' // project imports import MainCard from 'ui-component/cards/MainCard' import Transitions from 'ui-component/extended/Transitions' -import NavItem from 'layout/MainLayout/Sidebar/MenuList/NavItem' - import settings from 'menu-items/settings' // ==============================|| SETTINGS ||============================== // @@ -20,9 +20,26 @@ import settings from 'menu-items/settings' const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onUploadFile, onClose }) => { const theme = useTheme() const [settingsMenu, setSettingsMenu] = useState([]) - + const customization = useSelector((state) => state.customization) + const inputFile = useRef(null) const [open, setOpen] = useState(false) + const handleFileUpload = (e) => { + if (!e.target.files) return + + const file = e.target.files[0] + + const reader = new FileReader() + reader.onload = (evt) => { + if (!evt?.target?.result) { + return + } + const { result } = evt.target + onUploadFile(result) + } + reader.readAsText(file) + } + useEffect(() => { if (chatflow && !chatflow.id) { const settingsMenu = settings.children.filter((menu) => menu.id === 'loadChatflow') @@ -39,16 +56,40 @@ const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onU // settings list items const items = settingsMenu.map((menu) => { - return ( - onSettingsItemClick(id)} - onUploadFile={onUploadFile} + const Icon = menu.icon + const itemIcon = menu?.icon ? ( + + ) : ( + id === menu?.id) > -1 ? 8 : 6, + height: customization.isOpen.findIndex((id) => id === menu?.id) > -1 ? 8 : 6 + }} + fontSize={level > 0 ? 'inherit' : 'medium'} /> ) + return ( + { + if (menu.id === 'loadChatflow' && inputFile) { + inputFile?.current.click() + } else { + onSettingsItemClick(menu.id) + } + }} + > + {itemIcon} + {menu.title}} /> + + ) }) return ( @@ -82,6 +123,14 @@ const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onU {items} + handleFileUpload(e)} + /> From 68ac61c95fd877d9d280eff9047786b2243bb356 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 26 Feb 2024 19:14:13 +0800 Subject: [PATCH 62/62] fix speech to dialog state --- packages/ui/src/ui-component/dialog/SpeechToTextDialog.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js index 1366ea1a..489d4335 100644 --- a/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js +++ b/packages/ui/src/ui-component/dialog/SpeechToTextDialog.js @@ -108,6 +108,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { const [selectedProvider, setSelectedProvider] = useState('none') const onSave = async () => { + const speechToText = setValue(true, selectedProvider, 'status') try { const saveResp = await chatflowsApi.updateChatflow(dialogProps.chatflow.id, { speechToText: JSON.stringify(speechToText) @@ -165,11 +166,11 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { }) } setSpeechToText(newVal) + return newVal } const handleProviderChange = (event) => { setSelectedProvider(event.target.value) - setValue(true, event.target.value, 'status') } useEffect(() => { @@ -179,7 +180,7 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { let selectedProvider = 'none' Object.keys(speechToTextProviders).forEach((key) => { const providerConfig = speechToText[key] - if (providerConfig.status) { + if (providerConfig && providerConfig.status) { selectedProvider = key } }) @@ -187,12 +188,14 @@ const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => { setSpeechToText(speechToText) } catch (e) { setSpeechToText({}) + setSelectedProvider('none') console.error(e) } } return () => { setSpeechToText({}) + setSelectedProvider('none') } }, [dialogProps])