Compare commits

..

No commits in common. "main" and "flowise-components@1.6.2" have entirely different histories.

784 changed files with 27425 additions and 91830 deletions

View File

@ -5,6 +5,3 @@ build
**/node_modules
**/build
**/dist
packages/server/.env
packages/ui/.env

View File

@ -27,8 +27,7 @@ If applicable, add screenshots to help explain your problem.
If applicable, add exported flow in order to help replicating the problem.
**Setup**
- Installation [e.g. docker, `npx flowise start`, `pnpm start`]
- Installation [e.g. docker, `npx flowise start`, `yarn start`]
- Flowise Version [e.g. 1.2.11]
- OS: [e.g. macOS, Windows, Linux]
- Browser [e.g. chrome, safari]

View File

@ -11,14 +11,14 @@ jobs:
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Show PR info
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v3
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.AUTOSYNC_TOKEN }}
repository: ${{ secrets.AUTOSYNC_CH_URL }}
@ -28,6 +28,6 @@ jobs:
"ref": "${{ github.ref }}",
"prNumber": "${{ github.event.pull_request.number }}",
"prTitle": "${{ github.event.pull_request.title }}",
"prDescription": "",
"prDescription": "${{ github.event.pull_request.description }}",
"sha": "${{ github.sha }}"
}

View File

@ -23,7 +23,7 @@ jobs:
run: |
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v3
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.AUTOSYNC_TOKEN }}
repository: ${{ secrets.AUTOSYNC_CH_URL }}
@ -32,5 +32,5 @@ jobs:
{
"ref": "${{ github.ref }}",
"sha": "${{ github.sha }}",
"commitMessage": "${{ github.event.commits[0].id }}"
"commitMessage": "${{ github.event.commits[0].message }}"
}

View File

@ -1,43 +0,0 @@
name: Docker Image CI
on:
workflow_dispatch:
inputs:
node_version:
description: 'Node.js version to build this image with.'
type: choice
required: true
default: '20'
options:
- '20'
tag_version:
description: 'Tag version of the image to be pushed.'
type: string
required: true
default: 'latest'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4.1.1
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.0.0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5.3.0
with:
context: .
file: ./docker/Dockerfile
build-args: |
NODE_VERSION=${{github.event.inputs.node_version}}
platforms: linux/amd64,linux/arm64
push: true
tags: flowiseai/flowise:${{github.event.inputs.tag_version}}

View File

@ -1,13 +1,17 @@
name: Node CI
on:
push:
branches:
- main
pull_request:
branches:
- '*'
permissions:
contents: read
jobs:
build:
strategy:
@ -18,32 +22,16 @@ jobs:
env:
PUPPETEER_SKIP_DOWNLOAD: true
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v3
with:
version: 9.0.4
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
check-latest: false
cache: 'pnpm'
- run: npm i -g pnpm
- run: pnpm install
- run: ./node_modules/.bin/cypress install
- run: pnpm lint
- run: pnpm build
- name: Install dependencies
uses: cypress-io/github-action@v6
with:
working-directory: ./
runTests: false
- name: Cypress test
uses: cypress-io/github-action@v6
with:
install: false
working-directory: packages/server
start: pnpm start
wait-on: 'http://localhost:3000'
wait-on-timeout: 120
browser: chrome
- run: npm i -g yarn
- run: yarn install --ignore-engines
- run: yarn lint
- run: yarn build

60
.gitignore vendored
View File

@ -11,9 +11,6 @@
**/logs
**/*.log
## pnpm
.pnpm-store/
## build
**/dist
**/build
@ -47,60 +44,3 @@
## compressed
**/*.tgz
## vscode
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
## other keys
*.key
*.keys
*.priv
*.rsa
*.key.json
## ssh keys
*.ssh
*.ssh-key
.key-mrc
## Certificate Authority
*.ca
## Certificate
*.crt
## Certificate Sign Request
*.csr
## Certificate
*.der
## Key database file
*.kdb
## OSCP request data
*.org
## PKCS #12
*.p12
## PEM-encoded certificate data
*.pem
## Random number seed
*.rnd
## SSLeay data
*.ssleay
## S/MIME message
*.smime
*.vsix

View File

@ -1,5 +1,5 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
pnpm quick # prettify
pnpm lint-staged # eslint lint(also include prettify but prettify support more file extensions than eslint, so run prettify first)
yarn quick # prettify
yarn lint-staged # eslint lint(also include prettify but prettify support more file extensions than eslint, so run prettify first)

6
.npmrc
View File

@ -1,6 +0,0 @@
auto-install-peers = true
strict-peer-dependencies = false
prefer-workspace-packages = true
link-workspace-packages = deep
hoist = true
shamefully-hoist = true

3
.prettierignore Normal file
View File

@ -0,0 +1,3 @@
**/node_modules
**/dist
**/build

9
.prettierrc.js Normal file
View File

@ -0,0 +1,9 @@
module.exports = {
printWidth: 140,
singleQuote: true,
jsxSingleQuote: true,
trailingComma: 'none',
tabWidth: 4,
semi: false,
endOfLine: 'auto'
}

View File

@ -2,7 +2,7 @@
# 贡献者公约行为准则
[English](../CODE_OF_CONDUCT.md) | 中文
[English](<./CODE_OF_CONDUCT.md>) | 中文
## 我们的承诺
@ -44,6 +44,6 @@
## 归属
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4 版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
[主页]: http://contributor-covenant.org

View File

@ -1,6 +1,6 @@
# Contributor Covenant Code of Conduct
English | [中文](./i18n/CODE_OF_CONDUCT-ZH.md)
English | [中文](<./CODE_OF_CONDUCT-ZH.md>)
## Our Pledge

159
CONTRIBUTING-ZH.md Normal file
View File

@ -0,0 +1,159 @@
<!-- markdownlint-disable MD030 -->
# 贡献给 Flowise
[English](./CONTRIBUTING.md) | 中文
我们欢迎任何形式的贡献。
## ⭐ 点赞
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
## 🙋 问题和回答
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
## 🙌 分享 Chatflow
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
## 💡 想法
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
## 🐞 报告错误
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
## 👨‍💻 贡献代码
不确定要贡献什么?一些想法:
- 从 `packages/components` 创建新组件
- 更新现有组件,如扩展功能、修复错误
- 添加新的 Chatflow 想法
### 开发人员
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
- `server`:用于提供 API 逻辑的 Node 后端
- `ui`React 前端
- `components`Langchain/LlamaIndex 组件
#### 先决条件
- 安装 [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
```bash
npm i -g yarn
```
#### 逐步指南
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
2. 克隆你 fork 的存储库。
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
- 对于功能分支:`feature/<你的新功能>`
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
4. 切换到新创建的分支。
5. 进入存储库文件夹
```bash
cd Flowise
```
6. 安装所有模块的依赖项:
```bash
yarn install
```
7. 构建所有代码:
```bash
yarn build
```
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
```bash
yarn start
```
9. 开发时:
- 在`packages/ui`中创建`.env`文件并指定`PORT`(参考`.env.example`
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`
- 运行
```bash
yarn dev
```
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
对于`packages/components`中进行的更改,再次运行`yarn build`以应用更改。
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
```bash
yarn build
```
```bash
yarn start
```
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
## 🌱 环境变量
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
| 变量名 | 描述 | 类型 | 默认值 |
| --------------------------- | ------------------------------------------------------ | ----------------------------------------------- | ----------------------------------- |
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
| DEBUG | 打印组件的日志 | 布尔值 | |
| BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` |
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
您也可以在使用 `npx` 时指定环境变量。例如:
```
npx flowise start --PORT=3000 --DEBUG=true
```
## 📖 贡献文档
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
## 🏷️ Pull Request 流程
当您打开一个 Pull Request 时FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
##

View File

@ -2,7 +2,7 @@
# Contributing to Flowise
English | [中文](./i18n/CONTRIBUTING-ZH.md)
English | [中文](./CONTRIBUTING-ZH.md)
We appreciate any form of contributions.
@ -44,9 +44,9 @@ Flowise has 3 different modules in a single mono repository.
#### Prerequisite
- Install [PNPM](https://pnpm.io/installation). The project is configured to use pnpm v9.
- Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
```bash
npm i -g pnpm
npm i -g yarn
```
#### Step by step
@ -71,45 +71,45 @@ Flowise has 3 different modules in a single mono repository.
6. Install all dependencies of all modules:
```bash
pnpm install
yarn install
```
7. Build all the code:
```bash
pnpm build
yarn build
```
8. Start the app on [http://localhost:3000](http://localhost:3000)
```bash
pnpm start
yarn start
```
9. For development:
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui`
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
- Run
```bash
pnpm dev
yarn dev
```
Any changes made in `packages/ui` or `packages/server` will be reflected on [http://localhost:8080](http://localhost:8080)
For changes made in `packages/components`, run `pnpm build` again to pickup the changes.
For changes made in `packages/components`, run `yarn build` again to pickup the changes.
10. After making all the changes, run
```bash
pnpm build
yarn build
```
and
```bash
pnpm start
yarn start
```
to make sure everything works fine in production.
@ -120,41 +120,33 @@ Flowise has 3 different modules in a single mono repository.
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
| Variable | Description | Type | Default |
| ---------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DISABLE_CHATFLOW_REUSE | Forces the creation of a new ChatFlow for each call instead of reusing existing ones from cache | Boolean | |
| DEBUG | Print logs from components | Boolean | |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
| S3_STORAGE_REGION | Region for S3 bucket | String | |
| Variable | Description | Type | Default |
| --------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
You can also specify the env variables when using `npx`. For example:

View File

@ -4,7 +4,7 @@
# Run image
# docker run -d -p 3000:3000 flowise
FROM node:20-alpine
FROM node:18-alpine
RUN apk add --update libc6-compat python3 make g++
# needed for pdfjs-dist
RUN apk add --no-cache build-base cairo-dev pango-dev
@ -12,21 +12,30 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
#install PNPM globaly
RUN npm install -g pnpm
ENV PUPPETEER_SKIP_DOWNLOAD=true
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
WORKDIR /usr/src
WORKDIR /usr/src/packages
# Copy root package.json and lockfile
COPY package.json yarn.loc[k] ./
# Copy components package.json
COPY packages/components/package.json ./packages/components/package.json
# Copy ui package.json
COPY packages/ui/package.json ./packages/ui/package.json
# Copy server package.json
COPY packages/server/package.json ./packages/server/package.json
RUN yarn install
# Copy app source
COPY . .
RUN pnpm install
RUN pnpm build
RUN yarn build
EXPOSE 3000
CMD [ "pnpm", "start" ]
CMD [ "yarn", "start" ]

View File

@ -10,7 +10,7 @@
[![GitHub星图](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub分支](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | 中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
[English](./README.md) | 中文
<h3>拖放界面构建定制化的LLM流程</h3>
<a href="https://github.com/FlowiseAI/Flowise">
@ -44,9 +44,9 @@
1. 进入项目根目录下的 `docker` 文件夹
2. 创建 `.env` 文件并指定 `PORT`(参考 `.env.example`
3. 运行 `docker compose up -d`
3. 运行 `docker-compose up -d`
4. 打开 [http://localhost:3000](http://localhost:3000)
5. 可以通过 `docker compose stop` 停止容器
5. 可以通过 `docker-compose stop` 停止容器
### Docker 镜像
@ -75,9 +75,9 @@ Flowise 在一个单一的代码库中有 3 个不同的模块。
### 先决条件
- 安装 [PNPM](https://pnpm.io/installation)
- 安装 [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
```bash
npm i -g pnpm
npm i -g yarn
```
### 设置
@ -97,31 +97,31 @@ Flowise 在一个单一的代码库中有 3 个不同的模块。
3. 安装所有模块的依赖:
```bash
pnpm install
yarn install
```
4. 构建所有代码:
```bash
pnpm build
yarn build
```
5. 启动应用:
```bash
pnpm start
yarn start
```
现在可以在 [http://localhost:3000](http://localhost:3000) 访问应用
6. 用于开发构建:
- 在 `packages/ui` 中创建 `.env` 文件并指定 `VITE_PORT`(参考 `.env.example`
- 在 `packages/ui` 中创建 `.env` 文件并指定 `PORT`(参考 `.env.example`
- 在 `packages/server` 中创建 `.env` 文件并指定 `PORT`(参考 `.env.example`
- 运行
```bash
pnpm dev
yarn dev
```
任何代码更改都会自动重新加载应用程序,访问 [http://localhost:8080](http://localhost:8080)

View File

@ -10,7 +10,7 @@
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
English | [中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
English | [中文](./README-ZH.md)
<h3>Drag & drop UI to build your customized LLM flow</h3>
<a href="https://github.com/FlowiseAI/Flowise">
@ -44,9 +44,9 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
1. Go to `docker` folder at the root of the project
2. Copy `.env.example` file, paste it into the same location, and rename to `.env`
3. `docker compose up -d`
3. `docker-compose up -d`
4. Open [http://localhost:3000](http://localhost:3000)
5. You can bring the containers down by `docker compose stop`
5. You can bring the containers down by `docker-compose stop`
### Docker Image
@ -75,9 +75,9 @@ Flowise has 3 different modules in a single mono repository.
### Prerequisite
- Install [PNPM](https://pnpm.io/installation)
- Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
```bash
npm i -g pnpm
npm i -g yarn
```
### Setup
@ -97,31 +97,31 @@ Flowise has 3 different modules in a single mono repository.
3. Install all dependencies of all modules:
```bash
pnpm install
yarn install
```
4. Build all the code:
```bash
pnpm build
yarn build
```
5. Start the app:
```bash
pnpm start
yarn start
```
You can now access the app on [http://localhost:3000](http://localhost:3000)
6. For development build:
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui`
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
- Run
```bash
pnpm dev
yarn dev
```
Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)

13
babel.config.js Normal file
View File

@ -0,0 +1,13 @@
module.exports = {
presets: [
'@babel/preset-typescript',
[
'@babel/preset-env',
{
targets: {
node: 'current'
}
}
]
]
}

View File

@ -5,16 +5,17 @@ SECRETKEY_PATH=/root/.flowise
LOG_PATH=/root/.flowise/logs
BLOB_STORAGE_PATH=/root/.flowise/storage
# CORS_ORIGINS="*"
# IFRAME_ORIGINS="*"
# NUMBER_OF_PROXIES= 1
# CORS_ORIGINS=*
# IFRAME_ORIGINS=*
# DATABASE_TYPE=postgres
# DATABASE_PORT=5432
# DATABASE_PORT=""
# DATABASE_HOST=""
# DATABASE_NAME=flowise
# DATABASE_USER=root
# DATABASE_PASSWORD=mypassword
# DATABASE_NAME="flowise"
# DATABASE_USER=""
# DATABASE_PASSWORD=""
# DATABASE_SSL=true
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
@ -22,11 +23,8 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# FLOWISE_FILE_SIZE_LIMIT=50mb
# DISABLE_CHATFLOW_REUSE=true
# DEBUG=true
# LOG_LEVEL=info (error | warn | info | verbose | debug)
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
@ -35,15 +33,4 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
# DISABLE_FLOWISE_TELEMETRY=true
# Uncomment the following line to enable model list config, load the list of models from your local config file
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
# STORAGE_TYPE=local (local | s3)
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
# S3_STORAGE_BUCKET_NAME=flowise
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
# S3_STORAGE_SECRET_ACCESS_KEY=<your-secret-key>
# S3_STORAGE_REGION=us-west-2
# DISABLE_FLOWISE_TELEMETRY=true

View File

@ -1,25 +1,21 @@
# Stage 1: Build stage
FROM node:20-alpine as build
FROM node:18-alpine
USER root
# Skip downloading Chrome for Puppeteer (saves build time)
RUN apk add --no-cache git
RUN apk add --no-cache python3 py3-pip make g++
# needed for pdfjs-dist
RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
ENV PUPPETEER_SKIP_DOWNLOAD=true
# Install latest Flowise globally (specific version can be set: flowise@1.0.0)
RUN npm install -g flowise
# Stage 2: Runtime stage
FROM node:20-alpine
# Install runtime dependencies
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev
# Set the environment variable for Puppeteer to find Chromium
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
# Copy Flowise from the build stage
COPY --from=build /usr/local/lib/node_modules /usr/local/lib/node_modules
COPY --from=build /usr/local/bin /usr/local/bin
# You can install a specific version like: flowise@1.0.0
RUN npm install -g flowise
ENTRYPOINT ["flowise", "start"]
WORKDIR /data
CMD "flowise"

View File

@ -5,9 +5,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
## Usage
1. Create `.env` file and specify the `PORT` (refer to `.env.example`)
2. `docker compose up -d`
2. `docker-compose up -d`
3. Open [http://localhost:3000](http://localhost:3000)
4. You can bring the containers down by `docker compose stop`
4. You can bring the containers down by `docker-compose stop`
## 🔒 Authentication
@ -19,9 +19,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
```
3. `docker compose up -d`
3. `docker-compose up -d`
4. Open [http://localhost:3000](http://localhost:3000)
5. You can bring the containers down by `docker compose stop`
5. You can bring the containers down by `docker-compose stop`
## 🌱 Env Variables

View File

@ -28,9 +28,8 @@ services:
- LOG_PATH=${LOG_PATH}
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
ports:
- '${PORT}:${PORT}'
volumes:
- ~/.flowise:/root/.flowise
entrypoint: /bin/sh -c "sleep 3; flowise start"
command: /bin/sh -c "sleep 3; flowise start"

View File

@ -1,166 +0,0 @@
<!-- markdownlint-disable MD030 -->
# 贡献给 Flowise
[English](../CONTRIBUTING.md) | 中文
我们欢迎任何形式的贡献。
## ⭐ 点赞
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
## 🙋 问题和回答
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
## 🙌 分享 Chatflow
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
## 💡 想法
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
## 🐞 报告错误
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
## 👨‍💻 贡献代码
不确定要贡献什么?一些想法:
- 从 `packages/components` 创建新组件
- 更新现有组件,如扩展功能、修复错误
- 添加新的 Chatflow 想法
### 开发人员
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
- `server`:用于提供 API 逻辑的 Node 后端
- `ui`React 前端
- `components`Langchain/LlamaIndex 组件
#### 先决条件
- 安装 [PNPM](https://pnpm.io/installation)
```bash
npm i -g pnpm
```
#### 逐步指南
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
2. 克隆你 fork 的存储库。
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
- 对于功能分支:`feature/<你的新功能>`
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
4. 切换到新创建的分支。
5. 进入存储库文件夹
```bash
cd Flowise
```
6. 安装所有模块的依赖项:
```bash
pnpm install
```
7. 构建所有代码:
```bash
pnpm build
```
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
```bash
pnpm start
```
9. 开发时:
- 在`packages/ui`中创建`.env`文件并指定`VITE_PORT`(参考`.env.example`
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`
- 运行
```bash
pnpm dev
```
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
对于`packages/components`中进行的更改,再次运行`pnpm build`以应用更改。
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
```bash
pnpm build
```
```bash
pnpm start
```
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
## 🌱 环境变量
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
| 变量名 | 描述 | 类型 | 默认值 |
| ---------------------------- | -------------------------------------------------------------------- | ----------------------------------------------- | ----------------------------------- |
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
| DISABLE_CHATFLOW_REUSE | 强制为每次调用创建一个新的 ChatFlow而不是重用缓存中的现有 ChatFlow | 布尔值 | |
| DEBUG | 打印组件的日志 | 布尔值 | |
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |
| S3_STORAGE_BUCKET_NAME | S3 存储文件夹路径, 当`STORAGE_TYPE`是`s3` | 字符串 | |
| S3_STORAGE_ACCESS_KEY_ID | AWS 访问密钥 (Access Key) | 字符串 | |
| S3_STORAGE_SECRET_ACCESS_KEY | AWS 密钥 (Secret Key) | 字符串 | |
| S3_STORAGE_REGION | S3 存储地区 | 字符串 | |
您也可以在使用 `npx` 时指定环境变量。例如:
```
npx flowise start --PORT=3000 --DEBUG=true
```
## 📖 贡献文档
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
## 🏷️ Pull Request 流程
当您打开一个 Pull Request 时FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
##

View File

@ -1,204 +0,0 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - LLM アプリを簡単に構築
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
[![Twitter Follow](https://img.shields.io/twitter/follow/FlowiseAI?style=social)](https://twitter.com/FlowiseAI)
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | [中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
<h3>ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
## ⚡ クイックスタート
[NodeJS](https://nodejs.org/en/download) >= 18.15.0 をダウンロードしてインストール
1. Flowise のインストール
```bash
npm install -g flowise
```
2. Flowise の実行
```bash
npx flowise start
```
ユーザー名とパスワードを入力
```bash
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
```
3. [http://localhost:3000](http://localhost:3000) を開く
## 🐳 Docker
### Docker Compose
1. プロジェクトのルートにある `docker` フォルダに移動する
2. `.env.example` ファイルをコピーして同じ場所に貼り付け、名前を `.env` に変更する
3. `docker compose up -d`
4. [http://localhost:3000](http://localhost:3000) を開く
5. コンテナを停止するには、`docker compose stop` を使用します
### Docker Image
1. ローカルにイメージを構築する:
```bash
docker build --no-cache -t flowise .
```
2. image を実行:
```bash
docker run -d --name flowise -p 3000:3000 flowise
```
3. image を停止:
```bash
docker stop flowise
```
## 👨‍💻 開発者向け
Flowise には、3 つの異なるモジュールが 1 つの mono リポジトリにあります。
- `server`: API ロジックを提供する Node バックエンド
- `ui`: React フロントエンド
- `components`: サードパーティノードとの統合
### 必須条件
- [PNPM](https://pnpm.io/installation) をインストール
```bash
npm i -g pnpm
```
### セットアップ
1. リポジトリをクローン
```bash
git clone https://github.com/FlowiseAI/Flowise.git
```
2. リポジトリフォルダに移動
```bash
cd Flowise
```
3. すべてのモジュールの依存関係をインストール:
```bash
pnpm install
```
4. すべてのコードをビルド:
```bash
pnpm build
```
5. アプリを起動:
```bash
pnpm start
```
[http://localhost:3000](http://localhost:3000) でアプリにアクセスできるようになりました
6. 開発用ビルド:
- `.env` ファイルを作成し、`packages/ui` に `VITE_PORT` を指定する(`.env.example` を参照)
- `.env` ファイルを作成し、`packages/server` に `PORT` を指定する(`.env.example` を参照)
- 実行
```bash
pnpm dev
```
コードの変更は [http://localhost:8080](http://localhost:8080) に自動的にアプリをリロードします
## 🔒 認証
アプリレベルの認証を有効にするには、 `FLOWISE_USERNAME``FLOWISE_PASSWORD``packages/server``.env` ファイルに追加します:
```
FLOWISE_USERNAME=user
FLOWISE_PASSWORD=1234
```
## 🌱 環境変数
Flowise は、インスタンスを設定するためのさまざまな環境変数をサポートしています。`packages/server` フォルダ内の `.env` ファイルで以下の変数を指定することができる。[続き](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)を読む
## 📖 ドキュメント
[Flowise ドキュメント](https://docs.flowiseai.com/)
## 🌐 セルフホスト
お客様の既存インフラに Flowise をセルフホストでデプロイ、様々な[デプロイ](https://docs.flowiseai.com/configuration/deployment)をサポートします
- [AWS](https://docs.flowiseai.com/deployment/aws)
- [Azure](https://docs.flowiseai.com/deployment/azure)
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
- [GCP](https://docs.flowiseai.com/deployment/gcp)
- <details>
<summary>その他</summary>
- [Railway](https://docs.flowiseai.com/deployment/railway)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
- [Render](https://docs.flowiseai.com/deployment/render)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
- [Hugging Face Spaces](https://docs.flowiseai.com/deployment/hugging-face)
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Hugging Face Spaces"></a>
- [Elestio](https://elest.io/open-source/flowiseai)
[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
</details>
## 💻 クラウドホスト
近日公開
## 🙋 サポート
ご質問、問題提起、新機能のご要望は、[discussion](https://github.com/FlowiseAI/Flowise/discussions)までお気軽にどうぞ
## 🙌 コントリビュート
これらの素晴らしい貢献者に感謝します
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
</a>
[コントリビューティングガイド](CONTRIBUTING.md)を参照してください。質問や問題があれば、[Discord](https://discord.gg/jbaHfsRVBW) までご連絡ください。
[![Star History Chart](https://api.star-history.com/svg?repos=FlowiseAI/Flowise&type=Timeline)](https://star-history.com/#FlowiseAI/Flowise&Date)
## 📄 ライセンス
このリポジトリのソースコードは、[Apache License Version 2.0](LICENSE.md)の下で利用可能です。

View File

@ -1,204 +0,0 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - 간편한 LLM 애플리케이션 제작
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
[![Twitter Follow](https://img.shields.io/twitter/follow/FlowiseAI?style=social)](https://twitter.com/FlowiseAI)
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | [中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
<h3>드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
## ⚡빠른 시작 가이드
18.15.0 버전 이상의 [NodeJS](https://nodejs.org/en/download) 다운로드 및 설치
1. Flowise 설치
```bash
npm install -g flowise
```
2. Flowise 시작하기
```bash
npx flowise start
```
사용자 이름과 비밀번호로 시작하기
```bash
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
```
3. [http://localhost:3000](http://localhost:3000) URL 열기
## 🐳 도커(Docker)를 활용하여 시작하기
### 도커 컴포즈 활용
1. 프로젝트의 최상위(root) 디렉토리에 있는 `docker` 폴더로 이동하세요.
2. `.env.example` 파일을 복사한 후, 같은 경로에 붙여넣기 한 다음, `.env`로 이름을 변경합니다.
3. `docker compose up -d` 실행
4. [http://localhost:3000](http://localhost:3000) URL 열기
5. `docker compose stop` 명령어를 통해 컨테이너를 종료시킬 수 있습니다.
### 도커 이미지 활용
1. 로컬에서 이미지 빌드하기:
```bash
docker build --no-cache -t flowise .
```
2. 이미지 실행하기:
```bash
docker run -d --name flowise -p 3000:3000 flowise
```
3. 이미지 종료하기:
```bash
docker stop flowise
```
## 👨‍💻 개발자들을 위한 가이드
Flowise는 단일 리포지토리에 3개의 서로 다른 모듈이 있습니다.
- `server`: API 로직을 제공하는 노드 백엔드
- `ui`: 리액트 프론트엔드
- `components`: 서드파티 노드 통합을 위한 컴포넌트
### 사전 설치 요건
- [PNPM](https://pnpm.io/installation) 설치하기
```bash
npm i -g pnpm
```
### 설치 및 설정
1. 리포지토리 복제
```bash
git clone https://github.com/FlowiseAI/Flowise.git
```
2. 리포지토리 폴더로 이동
```bash
cd Flowise
```
3. 모든 모듈의 종속성 설치:
```bash
pnpm install
```
4. 모든 코드 빌드하기:
```bash
pnpm build
```
5. 애플리케이션 시작:
```bash
pnpm start
```
이제 [http://localhost:3000](http://localhost:3000)에서 애플리케이션에 접속할 수 있습니다.
6. 개발 환경에서 빌드할 경우:
- `packages/ui`경로에 `.env` 파일을 생성하고 `VITE_PORT`(`.env.example` 참조)를 지정합니다.
- `packages/server`경로에 `.env` 파일을 생성하고 `PORT`(`.env.example` 참조)를 지정합니다.
- 실행하기
```bash
pnpm dev
```
코드가 변경되면 [http://localhost:8080](http://localhost:8080)에서 자동으로 애플리케이션을 새로고침 합니다.
## 🔒 인증
애플리케이션 수준의 인증을 사용하려면 `packages/server``.env` 파일에 `FLOWISE_USERNAME``FLOWISE_PASSWORD`를 추가합니다:
```
FLOWISE_USERNAME=user
FLOWISE_PASSWORD=1234
```
## 🌱 환경 변수
Flowise는 인스턴스 구성을 위한 다양한 환경 변수를 지원합니다. `packages/server` 폴더 내 `.env` 파일에 다양한 환경 변수를 지정할 수 있습니다. [자세히 보기](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
## 📖 공식 문서
[Flowise 문서](https://docs.flowiseai.com/)
## 🌐 자체 호스팅 하기
기존 인프라 환경에서 Flowise를 자체 호스팅으로 배포하세요. 다양한 배포 [deployments](https://docs.flowiseai.com/configuration/deployment) 방법을 지원합니다.
- [AWS](https://docs.flowiseai.com/deployment/aws)
- [Azure](https://docs.flowiseai.com/deployment/azure)
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
- [GCP](https://docs.flowiseai.com/deployment/gcp)
- <details>
<summary>그 외</summary>
- [Railway](https://docs.flowiseai.com/deployment/railway)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
- [Render](https://docs.flowiseai.com/deployment/render)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
- [Elestio](https://elest.io/open-source/flowiseai)
[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
</details>
## 💻 클라우드 호스팅 서비스
곧 출시될 예정입니다.
## 🙋 기술 지원
질문, 버그 리포팅, 새로운 기능 요청 등은 [discussion](https://github.com/FlowiseAI/Flowise/discussions) 섹션에서 자유롭게 이야기 해주세요.
## 🙌 오픈소스 활동에 기여하기
다음과 같은 멋진 기여자들(contributors)에게 감사드립니다.
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
</a>
[contributing guide](CONTRIBUTING.md)를 살펴보세요. 디스코드 [Discord](https://discord.gg/jbaHfsRVBW) 채널에서도 이슈나 질의응답을 진행하실 수 있습니다.
[![Star History Chart](https://api.star-history.com/svg?repos=FlowiseAI/Flowise&type=Timeline)](https://star-history.com/#FlowiseAI/Flowise&Date)
## 📄 라이센스
본 리포지토리의 소스코드는 [Apache License Version 2.0](LICENSE.md) 라이센스가 적용됩니다.

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "1.8.4",
"version": "1.5.1",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
@ -11,19 +11,19 @@
],
"scripts": {
"build": "turbo run build",
"build-force": "pnpm clean && turbo run build --force",
"build-force": "turbo run build --force",
"dev": "turbo run dev --parallel",
"start": "run-script-os",
"start:windows": "cd packages/server/bin && run start",
"start:default": "cd packages/server/bin && ./run start",
"clean": "pnpm --filter \"./packages/**\" clean",
"nuke": "pnpm --filter \"./packages/**\" nuke && rimraf node_modules .turbo",
"clean": "npm exec -ws -- rimraf dist build",
"format": "prettier --write \"**/*.{ts,tsx,md}\"",
"test": "turbo run test",
"lint": "eslint \"**/*.{js,jsx,ts,tsx,json,md}\"",
"lint-fix": "pnpm lint --fix",
"lint-fix": "yarn lint --fix",
"quick": "pretty-quick --staged",
"postinstall": "husky install",
"migration:create": "pnpm typeorm migration:create"
"migration:create": "yarn typeorm migration:create"
},
"lint-staged": {
"*.{js,jsx,ts,tsx,json,md}": "eslint --fix"
@ -43,56 +43,18 @@
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-unused-imports": "^2.0.0",
"husky": "^8.0.1",
"kill-port": "^2.0.1",
"lint-staged": "^13.0.3",
"prettier": "^2.7.1",
"pretty-quick": "^3.1.3",
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",
"turbo": "1.10.16",
"turbo": "^1.7.4",
"typescript": "^4.8.4"
},
"pnpm": {
"onlyBuiltDependencies": [
"faiss-node",
"sqlite3"
]
},
"engines": {
"node": ">=18.15.0 <19.0.0 || ^20",
"pnpm": ">=9"
"node": ">=18.15.0"
},
"resolutions": {
"@qdrant/openapi-typescript-fetch": "1.2.1",
"@google/generative-ai": "^0.7.0",
"openai": "4.51.0"
},
"eslintIgnore": [
"**/dist",
"**/node_modules",
"**/build",
"**/package-lock.json"
],
"prettier": {
"printWidth": 140,
"singleQuote": true,
"jsxSingleQuote": true,
"trailingComma": "none",
"tabWidth": 4,
"semi": false,
"endOfLine": "auto"
},
"babel": {
"presets": [
"@babel/preset-typescript",
[
"@babel/preset-env",
{
"targets": {
"node": "current"
}
}
]
]
"@qdrant/openapi-typescript-fetch": "1.2.1"
}
}

View File

@ -10,8 +10,13 @@ class AstraDBApi implements INodeCredential {
constructor() {
this.label = 'Astra DB API'
this.name = 'AstraDBApi'
this.version = 2.0
this.version = 1.0
this.inputs = [
{
label: 'Astra DB Collection Name',
name: 'collectionName',
type: 'string'
},
{
label: 'Astra DB Application Token',
name: 'applicationToken',

View File

@ -1,28 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class BaiduApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'Baidu API'
this.name = 'baiduApi'
this.version = 1.0
this.inputs = [
{
label: 'Baidu Api Key',
name: 'baiduApiKey',
type: 'password'
},
{
label: 'Baidu Secret Key',
name: 'baiduSecretKey',
type: 'password'
}
]
}
}
module.exports = { credClass: BaiduApi }

View File

@ -1,23 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class ChatflowApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'Chatflow API'
this.name = 'chatflowApi'
this.version = 1.0
this.inputs = [
{
label: 'Chatflow Api Key',
name: 'chatflowApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: ChatflowApi }

View File

@ -1,6 +1,6 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class ConfluenceCloudApi implements INodeCredential {
class ConfluenceApi implements INodeCredential {
label: string
name: string
version: number
@ -8,8 +8,8 @@ class ConfluenceCloudApi implements INodeCredential {
inputs: INodeParams[]
constructor() {
this.label = 'Confluence Cloud API'
this.name = 'confluenceCloudApi'
this.label = 'Confluence API'
this.name = 'confluenceApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://support.atlassian.com/confluence-cloud/docs/manage-oauth-access-tokens/">official guide</a> on how to get Access Token or <a target="_blank" href="https://id.atlassian.com/manage-profile/security/api-tokens">API Token</a> on Confluence'
@ -30,4 +30,4 @@ class ConfluenceCloudApi implements INodeCredential {
}
}
module.exports = { credClass: ConfluenceCloudApi }
module.exports = { credClass: ConfluenceApi }

View File

@ -1,27 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class ConfluenceServerDCApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Confluence Server/Data Center API'
this.name = 'confluenceServerDCApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html/">official guide</a> on how to get Personal Access Token</a> on Confluence'
this.inputs = [
{
label: 'Personal Access Token',
name: 'personalAccessToken',
type: 'password',
placeholder: '<CONFLUENCE_PERSONAL_ACCESS_TOKEN>'
}
]
}
}
module.exports = { credClass: ConfluenceServerDCApi }

View File

@ -1,39 +0,0 @@
/*
* Temporary disabled due to the incompatibility with the docker node-alpine:
* https://github.com/FlowiseAI/Flowise/pull/2303
import { INodeParams, INodeCredential } from '../src/Interface'
class CouchbaseApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Couchbase API'
this.name = 'couchbaseApi'
this.version = 1.0
this.inputs = [
{
label: 'Couchbase Connection String',
name: 'connectionString',
type: 'string'
},
{
label: 'Couchbase Username',
name: 'username',
type: 'string'
},
{
label: 'Couchbase Password',
name: 'password',
type: 'password'
}
]
}
}
module.exports = { credClass: CouchbaseApi }
*/

View File

@ -1,26 +0,0 @@
/*
* TODO: Implement codeInterpreter column to chat_message table
import { INodeParams, INodeCredential } from '../src/Interface'
class E2BApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'E2B API'
this.name = 'E2BApi'
this.version = 1.0
this.inputs = [
{
label: 'E2B Api Key',
name: 'e2bApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: E2BApi }
*/

View File

@ -1,26 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class ExaSearchApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Exa Search API'
this.name = 'exaSearchApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://docs.exa.ai/reference/getting-started#getting-access">official guide</a> on how to get an API Key from Exa'
this.inputs = [
{
label: 'ExaSearch Api Key',
name: 'exaSearchApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: ExaSearchApi }

View File

@ -1,26 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class FireCrawlApiCredential implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'FireCrawl API'
this.name = 'fireCrawlApi'
this.version = 1.0
this.description =
'You can find the FireCrawl API token on your <a target="_blank" href="https://www.firecrawl.dev/">FireCrawl account</a> page.'
this.inputs = [
{
label: 'FireCrawl API',
name: 'firecrawlApiToken',
type: 'password'
}
]
}
}
module.exports = { credClass: FireCrawlApiCredential }

View File

@ -1,23 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class FireworksApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'Fireworks API'
this.name = 'fireworksApi'
this.version = 1.0
this.inputs = [
{
label: 'Fireworks Api Key',
name: 'fireworksApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: FireworksApi }

View File

@ -1,23 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class GroqApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'Groq API'
this.name = 'groqApi'
this.version = 1.0
this.inputs = [
{
label: 'Groq Api Key',
name: 'groqApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: GroqApi }

View File

@ -1,33 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class LangWatchApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'LangWatch API'
this.name = 'langwatchApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://docs.langwatch.ai/integration/python/guide">integration guide</a> on how to get API keys on LangWatch'
this.inputs = [
{
label: 'API Key',
name: 'langWatchApiKey',
type: 'password',
placeholder: '<LANGWATCH_API_KEY>'
},
{
label: 'Endpoint',
name: 'langWatchEndpoint',
type: 'string',
default: 'https://app.langwatch.ai'
}
]
}
}
module.exports = { credClass: LangWatchApi }

View File

@ -0,0 +1,31 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class MotorheadMemoryApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Motorhead Memory API'
this.name = 'motorheadMemoryApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://docs.getmetal.io/misc-get-keys">official guide</a> on how to create API key and Client ID on Motorhead Memory'
this.inputs = [
{
label: 'Client ID',
name: 'clientId',
type: 'string'
},
{
label: 'API Key',
name: 'apiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: MotorheadMemoryApi }

View File

@ -1,31 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class MySQLApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'MySQL API'
this.name = 'MySQLApi'
this.version = 1.0
this.inputs = [
{
label: 'User',
name: 'user',
type: 'string',
placeholder: '<MYSQL_USERNAME>'
},
{
label: 'Password',
name: 'password',
type: 'password',
placeholder: '<MYSQL_PASSWORD>'
}
]
}
}
module.exports = { credClass: MySQLApi }

View File

@ -1,38 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class OpenSearchUrl implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'OpenSearch'
this.name = 'openSearchUrl'
this.version = 2.0
this.inputs = [
{
label: 'OpenSearch Url',
name: 'openSearchUrl',
type: 'string'
},
{
label: 'User',
name: 'user',
type: 'string',
placeholder: '<OPENSEARCH_USERNAME>',
optional: true
},
{
label: 'Password',
name: 'password',
type: 'password',
placeholder: '<OPENSEARCH_PASSWORD>',
optional: true
}
]
}
}
module.exports = { credClass: OpenSearchUrl }

View File

@ -1,25 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class SpiderApiCredential implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Spider API'
this.name = 'spiderApi'
this.version = 1.0
this.description = 'Get your API key from the <a target="_blank" href="https://spider.cloud">Spider</a> dashboard.'
this.inputs = [
{
label: 'Spider API Key',
name: 'spiderApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: SpiderApiCredential }

View File

@ -1,23 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class TogetherAIApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'TogetherAI API'
this.name = 'togetherAIApi'
this.version = 1.0
this.inputs = [
{
label: 'TogetherAI Api Key',
name: 'togetherAIApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: TogetherAIApi }

View File

@ -1,29 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class UpstashVectorApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Upstash Vector API'
this.name = 'upstashVectorApi'
this.version = 1.0
this.inputs = [
{
label: 'Upstash Vector REST URL',
name: 'UPSTASH_VECTOR_REST_URL',
type: 'string'
},
{
label: 'Upstash Vector REST Token',
name: 'UPSTASH_VECTOR_REST_TOKEN',
type: 'password'
}
]
}
}
module.exports = { credClass: UpstashVectorApi }

View File

@ -1,32 +0,0 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class VoyageAIApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Voyage AI API'
this.name = 'voyageAIApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://docs.voyageai.com/install/#authentication-with-api-keys">official guide</a> on how to get an API Key'
this.inputs = [
{
label: 'Voyage AI Endpoint',
name: 'endpoint',
type: 'string',
default: 'https://api.voyageai.com/v1/embeddings'
},
{
label: 'Voyage AI API Key',
name: 'apiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: VoyageAIApi }

View File

@ -1,4 +1,6 @@
const { src, dest } = require('gulp')
import gulp from 'gulp'
const { src, dest } = gulp
function copyIcons() {
return src(['nodes/**/*.{jpg,png,svg}']).pipe(dest('dist/nodes'))

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,6 @@ import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '..
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class Airtable_Agents implements INode {
label: string
@ -24,7 +22,7 @@ class Airtable_Agents implements INode {
constructor() {
this.label = 'Airtable Agent'
this.name = 'airtableAgent'
this.version = 2.0
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'airtable.svg'
@ -73,14 +71,6 @@ class Airtable_Agents implements INode {
default: 100,
additionalParams: true,
description: 'Number of results to return'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -90,24 +80,12 @@ class Airtable_Agents implements INode {
return undefined
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const model = nodeData.inputs?.model as BaseLanguageModel
const baseId = nodeData.inputs?.baseId as string
const tableId = nodeData.inputs?.tableId as string
const returnAll = nodeData.inputs?.returnAll as boolean
const limit = nodeData.inputs?.limit as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Vectara chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const accessToken = getCredentialParam('accessToken', credentialData, nodeData)

View File

@ -7,8 +7,6 @@ import { PromptTemplate } from '@langchain/core/prompts'
import { AutoGPT } from 'langchain/experimental/autogpt'
import { LLMChain } from 'langchain/chains'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
type ObjectTool = StructuredTool
const FINISH_NAME = 'finish'
@ -27,7 +25,7 @@ class AutoGPT_Agents implements INode {
constructor() {
this.label = 'AutoGPT'
this.name = 'autoGPT'
this.version = 2.0
this.version = 1.0
this.type = 'AutoGPT'
this.category = 'Agents'
this.icon = 'autogpt.svg'
@ -70,14 +68,6 @@ class AutoGPT_Agents implements INode {
type: 'number',
default: 5,
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -102,21 +92,9 @@ class AutoGPT_Agents implements INode {
return autogpt
}
async run(nodeData: INodeData, input: string): Promise<string | object> {
async run(nodeData: INodeData, input: string): Promise<string> {
const executor = nodeData.instance as AutoGPT
const model = nodeData.inputs?.model as BaseChatModel
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the AutoGPT agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
try {
let totalAssistantReply = ''

View File

@ -2,8 +2,6 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { VectorStore } from '@langchain/core/vectorstores'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { BabyAGI } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class BabyAGI_Agents implements INode {
label: string
@ -19,7 +17,7 @@ class BabyAGI_Agents implements INode {
constructor() {
this.label = 'BabyAGI'
this.name = 'babyAGI'
this.version = 2.0
this.version = 1.0
this.type = 'BabyAGI'
this.category = 'Agents'
this.icon = 'babyagi.svg'
@ -41,14 +39,6 @@ class BabyAGI_Agents implements INode {
name: 'taskLoop',
type: 'number',
default: 3
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -63,21 +53,8 @@ class BabyAGI_Agents implements INode {
return babyAgi
}
async run(nodeData: INodeData, input: string): Promise<string | object> {
async run(nodeData: INodeData, input: string): Promise<string> {
const executor = nodeData.instance as BabyAGI
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const objective = input
const res = await executor.call({ objective })

View File

@ -5,9 +5,6 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { getFileFromStorage } from '../../../src'
class CSV_Agents implements INode {
label: string
@ -23,7 +20,7 @@ class CSV_Agents implements INode {
constructor() {
this.label = 'CSV Agent'
this.name = 'csvAgent'
this.version = 3.0
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'CSVagent.svg'
@ -50,24 +47,6 @@ class CSV_Agents implements INode {
optional: true,
placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Custom Pandas Read_CSV Code',
description:
'Custom Pandas <a target="_blank" href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html">read_csv</a> function. Takes in an input: "csv_data"',
name: 'customReadCSV',
default: 'read_csv(csv_data)',
type: 'code',
optional: true,
additionalParams: true
}
]
}
@ -77,56 +56,29 @@ class CSV_Agents implements INode {
return undefined
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const csvFileBase64 = nodeData.inputs?.csvFile as string
const model = nodeData.inputs?.model as BaseLanguageModel
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const _customReadCSV = nodeData.inputs?.customReadCSV as string
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the CSV agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const callbacks = await additionalCallbacks(nodeData, options)
let files: string[] = []
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
files = JSON.parse(csvFileBase64)
} else {
files = [csvFileBase64]
}
let base64String = ''
if (csvFileBase64.startsWith('FILE-STORAGE::')) {
const fileName = csvFileBase64.replace('FILE-STORAGE::', '')
if (fileName.startsWith('[') && fileName.endsWith(']')) {
files = JSON.parse(fileName)
} else {
files = [fileName]
}
const chatflowid = options.chatflowid
for (const file of files) {
const fileData = await getFileFromStorage(file, chatflowid)
base64String += fileData.toString('base64')
}
} else {
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
files = JSON.parse(csvFileBase64)
} else {
files = [csvFileBase64]
}
for (const file of files) {
const splitDataURI = file.split(',')
splitDataURI.pop()
base64String += splitDataURI.pop() ?? ''
}
for (const file of files) {
const splitDataURI = file.split(',')
splitDataURI.pop()
base64String = splitDataURI.pop() ?? ''
}
const pyodide = await LoadPyodide()
@ -134,7 +86,6 @@ class CSV_Agents implements INode {
// First load the csv file and get the dataframe dictionary of column types
// For example using titanic.csv: {'PassengerId': 'int64', 'Survived': 'int64', 'Pclass': 'int64', 'Name': 'object', 'Sex': 'object', 'Age': 'float64', 'SibSp': 'int64', 'Parch': 'int64', 'Ticket': 'object', 'Fare': 'float64', 'Cabin': 'object', 'Embarked': 'object'}
let dataframeColDict = ''
let customReadCSVFunc = _customReadCSV ? _customReadCSV : 'read_csv(csv_data)'
try {
const code = `import pandas as pd
import base64
@ -147,7 +98,7 @@ decoded_data = base64.b64decode(base64_string)
csv_data = StringIO(decoded_data.decode('utf-8'))
df = pd.${customReadCSVFunc}
df = pd.read_csv(csv_data)
my_dict = df.dtypes.astype(str).to_dict()
print(my_dict)
json.dumps(my_dict)`

View File

@ -4,16 +4,15 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { IVisionChatModal, FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -47,7 +46,7 @@ class ConversationalAgent_Agents implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Agent'
this.name = 'conversationalAgent'
this.version = 3.0
this.version = 2.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'agent.svg'
@ -78,72 +77,35 @@ class ConversationalAgent_Agents implements INode {
default: DEFAULT_PREFIX,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const executor = await prepareAgent(
nodeData,
options,
{ sessionId: this.sessionId, chatId: options.chatId, input },
options.chatHistory
)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
await memory.addChatMessages(
@ -160,37 +122,23 @@ class ConversationalAgent_Agents implements INode {
this.sessionId
)
let finalRes = res?.output
if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
return res?.output
}
}
const prepareAgent = async (
nodeData: INodeData,
options: ICommonObject,
flowObj: { sessionId?: string; chatId?: string; input?: string }
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as BaseChatModel
const maxIterations = nodeData.inputs?.maxIterations as string
let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools)
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
llm: model,
@ -202,32 +150,33 @@ const prepareAgent = async (
outputParser
})
if (llmSupportsVision(model)) {
const visionChatModel = model as IVisionChatModal
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
if (model instanceof ChatOpenAI) {
let humanImageMessages: HumanMessage[] = []
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
visionChatModel.setVisionModel()
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
// Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
prompt.promptMessages.push(msg)
}
// Add the HumanMessage for images
prompt.promptMessages.push(...humanImageMessages)
// Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder)
} else {
visionChatModel.revertToOriginalModel()
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
@ -241,7 +190,7 @@ const prepareAgent = async (
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
@ -256,8 +205,7 @@ const prepareAgent = async (
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true',
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
verbose: process.env.DEBUG === 'true'
})
return executor

View File

@ -0,0 +1,156 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
class ConversationalRetrievalAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent'
this.version = 3.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'agent.svg'
this.description = `An agent optimized for retrieval during conversation, answering questions based on past dialogue, all using OpenAI's Function Calling`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Allowed Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
default: defaultMessage,
rows: 4,
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return res?.output
}
}
const prepareAgent = (
nodeData: INodeData,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prompt = ChatPromptTemplate.fromMessages([
['ai', systemMessage ? systemMessage : defaultMessage],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithFunctions = model.bind({
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithFunctions,
new OpenAIFunctionsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
returnIntermediateSteps: true,
verbose: process.env.DEBUG === 'true' ? true : false
})
return executor
}
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }

View File

Before

Width:  |  Height:  |  Size: 616 B

After

Width:  |  Height:  |  Size: 616 B

View File

@ -1 +0,0 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><circle cx="16" cy="16" r="14" fill="#CC9B7A"/><path d="m10 21 4.5-10L19 21m-7.2-2.857h5.4M18.5 11 23 21" stroke="#1F1F1E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>

Before

Width:  |  Height:  |  Size: 269 B

View File

@ -1,142 +0,0 @@
import { flatten } from 'lodash'
import { MessageContentTextDetail, ChatMessage, AnthropicAgent, Anthropic } from 'llamaindex'
import { getBaseClasses } from '../../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
class AnthropicAgent_LlamaIndex_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Anthropic Agent'
this.name = 'anthropicAgentLlamaIndex'
this.version = 1.0
this.type = 'AnthropicAgent'
this.category = 'Agents'
this.icon = 'Anthropic.svg'
this.description = `Agent that uses Anthropic Claude Function Calling to pick the tools and args to call using LlamaIndex`
this.baseClasses = [this.type, ...getBaseClasses(AnthropicAgent)]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool_LlamaIndex',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'Anthropic Claude Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const model = nodeData.inputs?.model as Anthropic
const systemMessage = nodeData.inputs?.systemMessage as string
const prependMessages = options?.prependMessages
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const chatHistory = [] as ChatMessage[]
if (systemMessage) {
chatHistory.push({
content: systemMessage,
role: 'system'
})
}
const msgs = (await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
const agent = new AnthropicAgent({
tools,
llm: model,
chatHistory: chatHistory,
verbose: process.env.DEBUG === 'true' ? true : false
})
let text = ''
const usedTools: IUsedTool[] = []
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
if (response.sources.length) {
for (const sourceTool of response.sources) {
usedTools.push({
tool: sourceTool.tool?.metadata.name ?? '',
toolInput: sourceTool.input,
toolOutput: sourceTool.output as any
})
}
}
if (Array.isArray(response.response.message.content) && response.response.message.content.length > 0) {
text = (response.response.message.content[0] as MessageContentTextDetail).text
} else {
text = response.response.message.content as string
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: text,
type: 'apiMessage'
}
],
this.sessionId
)
return usedTools.length ? { text: text, usedTools } : text
}
}
module.exports = { nodeClass: AnthropicAgent_LlamaIndex_Agents }

View File

@ -1,167 +0,0 @@
import { flatten } from 'lodash'
import { ChatMessage, OpenAI, OpenAIAgent } from 'llamaindex'
import { getBaseClasses } from '../../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Tool Agent'
this.name = 'openAIToolAgentLlamaIndex'
this.version = 2.0
this.type = 'OpenAIToolAgent'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call using LlamaIndex`
this.baseClasses = [this.type, ...getBaseClasses(OpenAIAgent)]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool_LlamaIndex',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const model = nodeData.inputs?.model as OpenAI
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const isStreamingEnabled = options.socketIO && options.socketIOClientId
const chatHistory = [] as ChatMessage[]
if (systemMessage) {
chatHistory.push({
content: systemMessage,
role: 'system'
})
}
const msgs = (await memory.getChatMessages(this.sessionId, false)) as IMessage[]
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
const agent = new OpenAIAgent({
tools,
llm: model,
chatHistory: chatHistory,
verbose: process.env.DEBUG === 'true' ? true : false
})
let text = ''
let isStreamingStarted = false
const usedTools: IUsedTool[] = []
if (isStreamingEnabled) {
const stream = await agent.chat({
message: input,
chatHistory,
stream: true,
verbose: process.env.DEBUG === 'true' ? true : false
})
for await (const chunk of stream) {
//console.log('chunk', chunk)
text += chunk.response.delta
if (!isStreamingStarted) {
isStreamingStarted = true
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response.delta)
if (chunk.sources.length) {
for (const sourceTool of chunk.sources) {
usedTools.push({
tool: sourceTool.tool?.metadata.name ?? '',
toolInput: sourceTool.input,
toolOutput: sourceTool.output as any
})
}
options.socketIO.to(options.socketIOClientId).emit('usedTools', usedTools)
}
}
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response.delta)
}
} else {
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
if (response.sources.length) {
for (const sourceTool of response.sources) {
usedTools.push({
tool: sourceTool.tool?.metadata.name ?? '',
toolInput: sourceTool.input,
toolOutput: sourceTool.output as any
})
}
}
text = response.response.message.content as string
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: text,
type: 'apiMessage'
}
],
this.sessionId
)
return usedTools.length ? { text: text, usedTools } : text
}
}
module.exports = { nodeClass: OpenAIFunctionAgent_LlamaIndex_Agents }

View File

@ -1,19 +1,19 @@
import { flatten } from 'lodash'
import { AgentExecutor } from 'langchain/agents'
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { pull } from 'langchain/hub'
import { Tool } from '@langchain/core/tools'
import type { PromptTemplate } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { pull } from 'langchain/hub'
import { additionalCallbacks } from '../../../src/handler'
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { HumanMessage } from '@langchain/core/messages'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatPromptTemplate, HumanMessagePromptTemplate } from 'langchain/prompts'
class ReActAgentChat_Agents implements INode {
class MRKLAgentChat_Agents implements INode {
label: string
name: string
version: number
@ -27,8 +27,8 @@ class ReActAgentChat_Agents implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'ReAct Agent for Chat Models'
this.name = 'reactAgentChat'
this.version = 4.0
this.name = 'mrklAgentChat'
this.version = 3.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'agent.svg'
@ -50,21 +50,6 @@ class ReActAgentChat_Agents implements INode {
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
@ -74,49 +59,32 @@ class ReActAgentChat_Agents implements INode {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const maxIterations = nodeData.inputs?.maxIterations as string
const model = nodeData.inputs?.model as BaseChatModel
let tools = nodeData.inputs?.tools as Tool[]
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const prependMessages = options?.prependMessages
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the ReAct Agent for Chat Models
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
tools = flatten(tools)
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
let chatPromptTemplate = undefined
if (llmSupportsVision(model)) {
const visionChatModel = model as IVisionChatModal
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
if (model instanceof ChatOpenAI) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to vision supported
visionChatModel.setVisionModel()
const oldTemplate = prompt.template as string
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: oldTemplate
}
])
msg.inputVariables = prompt.inputVariables
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
const oldTemplate = prompt.template as string
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
} else {
// revert to previous values if image upload is empty
visionChatModel.revertToOriginalModel()
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
@ -129,13 +97,13 @@ class ReActAgentChat_Agents implements INode {
const executor = new AgentExecutor({
agent,
tools,
verbose: process.env.DEBUG === 'true',
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
verbose: process.env.DEBUG === 'true'
})
const callbacks = await additionalCallbacks(nodeData, options)
const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
const prevChatHistory = options.chatHistory
const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prevChatHistory)) as IMessage[]) ?? []
const chatHistoryString = chatHistory.map((hist) => hist.message).join('\\n')
const result = await executor.invoke({ input, chat_history: chatHistoryString }, { callbacks })
@ -158,4 +126,4 @@ class ReActAgentChat_Agents implements INode {
}
}
module.exports = { nodeClass: ReActAgentChat_Agents }
module.exports = { nodeClass: MRKLAgentChat_Agents }

View File

Before

Width:  |  Height:  |  Size: 616 B

After

Width:  |  Height:  |  Size: 616 B

View File

@ -3,15 +3,13 @@ import { AgentExecutor } from 'langchain/agents'
import { pull } from 'langchain/hub'
import { Tool } from '@langchain/core/tools'
import type { PromptTemplate } from '@langchain/core/prompts'
import { BaseLanguageModel } from '@langchain/core/language_models/base'
import { BaseLanguageModel } from 'langchain/base_language'
import { additionalCallbacks } from '../../../src/handler'
import { getBaseClasses } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { createReactAgent } from '../../../src/agents'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class ReActAgentLLM_Agents implements INode {
class MRKLAgentLLM_Agents implements INode {
label: string
name: string
version: number
@ -24,8 +22,8 @@ class ReActAgentLLM_Agents implements INode {
constructor() {
this.label = 'ReAct Agent for LLMs'
this.name = 'reactAgentLLM'
this.version = 2.0
this.name = 'mrklAgentLLM'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'agent.svg'
@ -42,21 +40,6 @@ class ReActAgentLLM_Agents implements INode {
label: 'Language Model',
name: 'model',
type: 'BaseLanguageModel'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
}
@ -65,23 +48,9 @@ class ReActAgentLLM_Agents implements INode {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const model = nodeData.inputs?.model as BaseLanguageModel
const maxIterations = nodeData.inputs?.maxIterations as string
let tools = nodeData.inputs?.tools as Tool[]
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the ReAct Agent for LLMs
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
tools = flatten(tools)
const prompt = await pull<PromptTemplate>('hwchase17/react')
@ -95,8 +64,7 @@ class ReActAgentLLM_Agents implements INode {
const executor = new AgentExecutor({
agent,
tools,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
verbose: process.env.DEBUG === 'true' ? true : false
})
const callbacks = await additionalCallbacks(nodeData, options)
@ -107,4 +75,4 @@ class ReActAgentLLM_Agents implements INode {
}
}
module.exports = { nodeClass: ReActAgentLLM_Agents }
module.exports = { nodeClass: MRKLAgentLLM_Agents }

View File

@ -0,0 +1,7 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10 6C10 5.44772 10.4477 5 11 5H21C21.5523 5 22 5.44772 22 6V11C22 13.2091 20.2091 15 18 15H14C11.7909 15 10 13.2091 10 11V6Z" stroke="black" stroke-width="2" stroke-linejoin="round"/>
<path d="M16 5V3" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<circle cx="14" cy="9" r="1.5" fill="black"/>
<circle cx="18" cy="9" r="1.5" fill="black"/>
<path d="M26 27C26 22.0294 21.5228 18 16 18C10.4772 18 6 22.0294 6 27" stroke="black" stroke-width="2" stroke-linecap="round"/>
</svg>

After

Width:  |  Height:  |  Size: 616 B

View File

@ -1,17 +1,16 @@
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams, IUsedTool } from '../../../src/Interface'
import OpenAI from 'openai'
import { DataSource } from 'typeorm'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { getCredentialData, getCredentialParam, getUserHome } from '../../../src/utils'
import { MessageContentImageFile, MessageContentText } from 'openai/resources/beta/threads/messages/messages'
import * as fsDefault from 'node:fs'
import * as path from 'node:path'
import fetch from 'node-fetch'
import { flatten, uniqWith, isEqual } from 'lodash'
import { zodToJsonSchema } from 'zod-to-json-schema'
import { AnalyticHandler } from '../../../src/handler'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addSingleFileToStorage } from '../../../src/storageUtils'
const lenticularBracketRegex = /【[^】]*】/g
const imageRegex = /<img[^>]*\/>/g
class OpenAIAssistant_Agents implements INode {
label: string
@ -27,7 +26,7 @@ class OpenAIAssistant_Agents implements INode {
constructor() {
this.label = 'OpenAI Assistant'
this.name = 'openAIAssistant'
this.version = 4.0
this.version = 3.0
this.type = 'OpenAIAssistant'
this.category = 'Agents'
this.icon = 'assistant.svg'
@ -54,25 +53,6 @@ class OpenAIAssistant_Agents implements INode {
optional: true,
list: true
},
{
label: 'Tool Choice',
name: 'toolChoice',
type: 'string',
description:
'Controls which (if any) tool is called by the model. Can be "none", "auto", "required", or the name of a tool. Refer <a href="https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tool_choice" target="_blank">here</a> for more information',
placeholder: 'file_search',
optional: true,
additionalParams: true
},
{
label: 'Parallel Tool Calls',
name: 'parallelToolCalls',
type: 'boolean',
description: 'Whether to enable parallel function calling during tool use. Defaults to true',
default: true,
optional: true,
additionalParams: true
},
{
label: 'Disable File Download',
name: 'disableFileDownload',
@ -157,14 +137,10 @@ class OpenAIAssistant_Agents implements INode {
const openai = new OpenAI({ apiKey: openAIApiKey })
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
try {
if (sessionId && sessionId.startsWith('thread_')) {
await openai.beta.threads.del(sessionId)
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
} else {
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
}
if (sessionId) await openai.beta.threads.del(sessionId)
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
} catch (e) {
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
throw new Error(e)
}
}
@ -174,8 +150,6 @@ class OpenAIAssistant_Agents implements INode {
const databaseEntities = options.databaseEntities as IDatabaseEntity
const disableFileDownload = nodeData.inputs?.disableFileDownload as boolean
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const _toolChoice = nodeData.inputs?.toolChoice as string
const parallelToolCalls = nodeData.inputs?.parallelToolCalls as boolean
const isStreaming = options.socketIO && options.socketIOClientId
const socketIO = isStreaming ? options.socketIO : undefined
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
@ -194,9 +168,6 @@ class OpenAIAssistant_Agents implements INode {
tools = flatten(tools)
const formattedTools = tools?.map((tool: any) => formatToOpenAIAssistantTool(tool)) ?? []
const usedTools: IUsedTool[] = []
const fileAnnotations = []
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
id: selectedAssistantId
})
@ -224,7 +195,7 @@ class OpenAIAssistant_Agents implements INode {
if (formattedTools.length) {
let filteredTools = []
for (const tool of retrievedAssistant.tools) {
if (tool.type === 'code_interpreter' || tool.type === 'file_search') filteredTools.push(tool)
if (tool.type === 'code_interpreter' || tool.type === 'retrieval') filteredTools.push(tool)
}
filteredTools = uniqWith([...filteredTools, ...formattedTools], isEqual)
// filter out tool with empty function
@ -265,8 +236,7 @@ class OpenAIAssistant_Agents implements INode {
(runStatus === 'cancelled' ||
runStatus === 'completed' ||
runStatus === 'expired' ||
runStatus === 'failed' ||
runStatus === 'requires_action')
runStatus === 'failed')
) {
clearInterval(timeout)
resolve()
@ -289,256 +259,11 @@ class OpenAIAssistant_Agents implements INode {
// Run assistant thread
const llmIds = await analyticHandlers.onLLMStart('ChatOpenAI', input, parentIds)
const runThread = await openai.beta.threads.runs.create(threadId, {
assistant_id: retrievedAssistant.id
})
let text = ''
let runThreadId = ''
let isStreamingStarted = false
let toolChoice: any
if (_toolChoice) {
if (_toolChoice === 'file_search') {
toolChoice = { type: 'file_search' }
} else if (_toolChoice === 'code_interpreter') {
toolChoice = { type: 'code_interpreter' }
} else if (_toolChoice === 'none' || _toolChoice === 'auto' || _toolChoice === 'required') {
toolChoice = _toolChoice
} else {
toolChoice = { type: 'function', function: { name: _toolChoice } }
}
}
if (isStreaming) {
const streamThread = await openai.beta.threads.runs.create(threadId, {
assistant_id: retrievedAssistant.id,
stream: true,
tool_choice: toolChoice,
parallel_tool_calls: parallelToolCalls
})
for await (const event of streamThread) {
if (event.event === 'thread.run.created') {
runThreadId = event.data.id
}
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0]
if (chunk && 'text' in chunk) {
if (chunk.text?.annotations?.length) {
const message_content = chunk.text
const annotations = chunk.text?.annotations
// Iterate over the annotations
for (let index = 0; index < annotations.length; index++) {
const annotation = annotations[index]
let filePath = ''
// Gather citations based on annotation attributes
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
if (file_citation) {
const cited_file = await openai.files.retrieve(file_citation.file_id)
// eslint-disable-next-line no-useless-escape
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
if (!disableFileDownload) {
filePath = await downloadFile(
openAIApiKey,
cited_file,
fileName,
options.chatflowid,
options.chatId
)
fileAnnotations.push({
filePath,
fileName
})
}
} else {
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
if (file_path) {
const cited_file = await openai.files.retrieve(file_path.file_id)
// eslint-disable-next-line no-useless-escape
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
if (!disableFileDownload) {
filePath = await downloadFile(
openAIApiKey,
cited_file,
fileName,
options.chatflowid,
options.chatId
)
fileAnnotations.push({
filePath,
fileName
})
}
}
}
// Replace the text with a footnote
message_content.value = message_content.value?.replace(
`${annotation.text}`,
`${disableFileDownload ? '' : filePath}`
)
}
// Remove lenticular brackets
message_content.value = message_content.value?.replace(lenticularBracketRegex, '')
text += message_content.value ?? ''
if (message_content.value) {
if (!isStreamingStarted) {
isStreamingStarted = true
socketIO.to(socketIOClientId).emit('start', message_content.value)
}
socketIO.to(socketIOClientId).emit('token', message_content.value)
}
if (fileAnnotations.length) {
if (!isStreamingStarted) {
isStreamingStarted = true
socketIO.to(socketIOClientId).emit('start', '')
}
socketIO.to(socketIOClientId).emit('fileAnnotations', fileAnnotations)
}
} else {
text += chunk.text?.value
if (!isStreamingStarted) {
isStreamingStarted = true
socketIO.to(socketIOClientId).emit('start', chunk.text?.value)
}
socketIO.to(socketIOClientId).emit('token', chunk.text?.value)
}
}
if (chunk && 'image_file' in chunk && chunk.image_file?.file_id) {
const fileId = chunk.image_file.file_id
const fileObj = await openai.files.retrieve(fileId)
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
const base64String = Buffer.from(buffer).toString('base64')
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
text += imgHTML
if (!isStreamingStarted) {
isStreamingStarted = true
socketIO.to(socketIOClientId).emit('start', imgHTML)
}
socketIO.to(socketIOClientId).emit('token', imgHTML)
}
}
if (event.event === 'thread.run.requires_action') {
if (event.data.required_action?.submit_tool_outputs.tool_calls) {
const actions: ICommonObject[] = []
event.data.required_action.submit_tool_outputs.tool_calls.forEach((item) => {
const functionCall = item.function
let args = {}
try {
args = JSON.parse(functionCall.arguments)
} catch (e) {
console.error('Error parsing arguments, default to empty object')
}
actions.push({
tool: functionCall.name,
toolInput: args,
toolCallId: item.id
})
})
const submitToolOutputs = []
for (let i = 0; i < actions.length; i += 1) {
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
if (!tool) continue
// Start tool analytics
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
try {
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
sessionId: threadId,
chatId: options.chatId,
input
})
await analyticHandlers.onToolEnd(toolIds, toolOutput)
submitToolOutputs.push({
tool_call_id: actions[i].toolCallId,
output: toolOutput
})
usedTools.push({
tool: tool.name,
toolInput: actions[i].toolInput,
toolOutput
})
} catch (e) {
await analyticHandlers.onToolEnd(toolIds, e)
console.error('Error executing tool', e)
throw new Error(
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
)
}
}
try {
const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, {
tool_outputs: submitToolOutputs
})
for await (const event of stream) {
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0]
if (chunk && 'text' in chunk && chunk.text?.value) {
text += chunk.text.value
if (!isStreamingStarted) {
isStreamingStarted = true
socketIO.to(socketIOClientId).emit('start', chunk.text.value)
}
socketIO.to(socketIOClientId).emit('token', chunk.text.value)
}
}
}
socketIO.to(socketIOClientId).emit('usedTools', usedTools)
} catch (error) {
console.error('Error submitting tool outputs:', error)
await openai.beta.threads.runs.cancel(threadId, runThreadId)
const errMsg = `Error submitting tool outputs. Thread ID: ${threadId}. Run ID: ${runThreadId}`
await analyticHandlers.onLLMError(llmIds, errMsg)
await analyticHandlers.onChainError(parentIds, errMsg, true)
throw new Error(errMsg)
}
}
}
}
// List messages
const messages = await openai.beta.threads.messages.list(threadId)
const messageData = messages.data ?? []
const assistantMessages = messageData.filter((msg) => msg.role === 'assistant')
if (!assistantMessages.length) return ''
// Remove images from the logging text
let llmOutput = text.replace(imageRegex, '')
llmOutput = llmOutput.replace('<br/>', '')
await analyticHandlers.onLLMEnd(llmIds, llmOutput)
await analyticHandlers.onChainEnd(parentIds, messageData, true)
return {
text,
usedTools,
fileAnnotations,
assistant: { assistantId: openAIAssistantId, threadId, runId: runThreadId, messages: messageData }
}
}
const usedTools: IUsedTool[] = []
const promise = (threadId: string, runId: string) => {
return new Promise((resolve, reject) => {
@ -574,7 +299,8 @@ class OpenAIAssistant_Agents implements INode {
// Start tool analytics
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
if (socketIO && socketIOClientId) socketIO.to(socketIOClientId).emit('tool', tool.name)
if (options.socketIO && options.socketIOClientId)
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
try {
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
@ -634,12 +360,7 @@ class OpenAIAssistant_Agents implements INode {
}
// Polling run status
const runThread = await openai.beta.threads.runs.create(threadId, {
assistant_id: retrievedAssistant.id,
tool_choice: toolChoice,
parallel_tool_calls: parallelToolCalls
})
runThreadId = runThread.id
let runThreadId = runThread.id
let state = await promise(threadId, runThread.id)
while (state === 'requires_action') {
state = await promise(threadId, runThread.id)
@ -650,9 +371,7 @@ class OpenAIAssistant_Agents implements INode {
if (retries > 0) {
retries -= 1
const newRunThread = await openai.beta.threads.runs.create(threadId, {
assistant_id: retrievedAssistant.id,
tool_choice: toolChoice,
parallel_tool_calls: parallelToolCalls
assistant_id: retrievedAssistant.id
})
runThreadId = newRunThread.id
state = await promise(threadId, newRunThread.id)
@ -670,47 +389,46 @@ class OpenAIAssistant_Agents implements INode {
if (!assistantMessages.length) return ''
let returnVal = ''
const fileAnnotations = []
for (let i = 0; i < assistantMessages[0].content.length; i += 1) {
if (assistantMessages[0].content[i].type === 'text') {
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.TextContentBlock
const content = assistantMessages[0].content[i] as MessageContentText
if (content.text.annotations) {
const message_content = content.text
const annotations = message_content.annotations
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
// Iterate over the annotations
for (let index = 0; index < annotations.length; index++) {
const annotation = annotations[index]
let filePath = ''
// Gather citations based on annotation attributes
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FileCitation)
.file_citation
if (file_citation) {
const cited_file = await openai.files.retrieve(file_citation.file_id)
// eslint-disable-next-line no-useless-escape
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
if (!disableFileDownload) {
filePath = await downloadFile(openAIApiKey, cited_file, fileName, options.chatflowid, options.chatId)
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
fileAnnotations.push({
filePath,
fileName
})
}
} else {
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
const file_path = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FilePath).file_path
if (file_path) {
const cited_file = await openai.files.retrieve(file_path.file_id)
// eslint-disable-next-line no-useless-escape
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
if (!disableFileDownload) {
filePath = await downloadFile(
openAIApiKey,
cited_file,
fileName,
options.chatflowid,
options.chatId
)
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
fileAnnotations.push({
filePath,
fileName
@ -731,14 +449,19 @@ class OpenAIAssistant_Agents implements INode {
returnVal += content.text.value
}
const lenticularBracketRegex = /【[^】]*】/g
returnVal = returnVal.replace(lenticularBracketRegex, '')
} else {
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.ImageFileContentBlock
const content = assistantMessages[0].content[i] as MessageContentImageFile
const fileId = content.image_file.file_id
const fileObj = await openai.files.retrieve(fileId)
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
const filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', `${fileObj.filename}.png`)
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
const base64String = Buffer.from(buffer).toString('base64')
await downloadImg(openai, fileId, filePath, dirPath)
const bitmap = fsDefault.readFileSync(filePath)
const base64String = Buffer.from(bitmap).toString('base64')
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
@ -746,6 +469,7 @@ class OpenAIAssistant_Agents implements INode {
}
}
const imageRegex = /<img[^>]*\/>/g
let llmOutput = returnVal.replace(imageRegex, '')
llmOutput = llmOutput.replace('<br/>', '')
@ -765,7 +489,7 @@ class OpenAIAssistant_Agents implements INode {
}
}
const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...paths: string[]) => {
const downloadImg = async (openai: OpenAI, fileId: string, filePath: string, dirPath: string) => {
const response = await openai.files.content(fileId)
// Extract the binary data from the Response object
@ -773,14 +497,15 @@ const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...
// Convert the binary data to a Buffer
const image_data_buffer = Buffer.from(image_data)
const mime = 'image/png'
await addSingleFileToStorage(mime, image_data_buffer, fileName, ...paths)
return image_data_buffer
// Save the image to a specific location
if (!fsDefault.existsSync(dirPath)) {
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
}
fsDefault.writeFileSync(filePath, image_data_buffer)
}
const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string, ...paths: string[]) => {
const downloadFile = async (fileObj: any, filePath: string, dirPath: string, openAIApiKey: string) => {
try {
const response = await fetch(`https://api.openai.com/v1/files/${fileObj.id}/content`, {
method: 'GET',
@ -791,21 +516,24 @@ const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string
throw new Error(`HTTP error! status: ${response.status}`)
}
// Extract the binary data from the Response object
const data = await response.arrayBuffer()
await new Promise<void>((resolve, reject) => {
if (!fsDefault.existsSync(dirPath)) {
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
}
const dest = fsDefault.createWriteStream(filePath)
response.body.pipe(dest)
response.body.on('end', () => resolve())
dest.on('error', reject)
})
// Convert the binary data to a Buffer
const data_buffer = Buffer.from(data)
const mime = 'application/octet-stream'
return await addSingleFileToStorage(mime, data_buffer, fileName, ...paths)
// eslint-disable-next-line no-console
console.log('File downloaded and written to', filePath)
} catch (error) {
console.error('Error downloading or writing the file:', error)
return ''
}
}
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => {
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreateParams.AssistantToolsFunction => {
return {
type: 'function',
function: {

View File

@ -0,0 +1,160 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
class OpenAIFunctionAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent'
this.version = 3.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `An agent that uses Function Calling to pick the tool and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Allowed Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'OpenAI/Azure Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output
}
}
const prepareAgent = (
nodeData: INodeData,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithFunctions = model.bind({
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithFunctions,
new OpenAIFunctionsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false
})
return executor
}
module.exports = { nodeClass: OpenAIFunctionAgent_Agents }

View File

@ -1,264 +0,0 @@
import { flatten } from 'lodash'
import { BaseMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { RunnableSequence } from '@langchain/core/runnables'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool, IVisionChatModal } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
class ToolAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'Tool Agent'
this.name = 'toolAgent'
this.version = 1.0
this.type = 'AgentExecutor'
this.category = 'Agents'
this.icon = 'toolAgent.png'
this.description = `Agent that uses Function Calling to pick the tools and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'Tool Calling Chat Model',
name: 'model',
type: 'BaseChatModel',
description:
'Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
default: `You are a helpful AI assistant.`,
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const isStreamable = options.socketIO && options.socketIOClientId
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
if (isStreamable)
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (isStreamable) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
let output = res?.output as string
// Claude 3 Opus tends to spit out <thinking>..</thinking> as well, discard that in final output
const regexPattern: RegExp = /<thinking>[\s\S]*?<\/thinking>/
const matches: RegExpMatchArray | null = output.match(regexPattern)
if (matches) {
for (const match of matches) {
output = output.replace(match, '')
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: output,
type: 'apiMessage'
}
],
this.sessionId
)
let finalRes = output
if (sourceDocuments.length || usedTools.length) {
const finalRes: ICommonObject = { text: output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
}
}
const prepareAgent = async (
nodeData: INodeData,
options: ICommonObject,
flowObj: { sessionId?: string; chatId?: string; input?: string }
) => {
const model = nodeData.inputs?.model as BaseChatModel
const maxIterations = nodeData.inputs?.maxIterations as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prependMessages = options?.prependMessages
const prompt = ChatPromptTemplate.fromMessages([
['system', systemMessage],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
if (llmSupportsVision(model)) {
const visionChatModel = model as IVisionChatModal
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
visionChatModel.setVisionModel()
// Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
prompt.promptMessages.push(msg)
}
// Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder)
} else {
visionChatModel.revertToOriginalModel()
}
}
if (model.bindTools === undefined) {
throw new Error(`This agent requires that the "bindTools()" method be implemented on the input model.`)
}
const modelWithTools = model.bindTools(tools)
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithTools,
new ToolCallingAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: ToolAgent_Agents }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

View File

@ -1,255 +0,0 @@
import { flatten } from 'lodash'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { RunnableSequence } from '@langchain/core/runnables'
import { Tool } from '@langchain/core/tools'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { formatLogToMessage } from 'langchain/agents/format_scratchpad/log_to_message'
import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, XMLAgentOutputParser } from '../../../src/agents'
import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}`
class XMLAgent_Agents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
sessionId?: string
badge?: string
constructor(fields?: { sessionId?: string }) {
this.label = 'XML Agent'
this.name = 'xmlAgent'
this.version = 2.0
this.type = 'XMLAgent'
this.category = 'Agents'
this.icon = 'xmlagent.svg'
this.description = `Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel'
},
{
label: 'System Message',
name: 'systemMessage',
type: 'string',
warning: 'Prompt must include input variables: {tools}, {chat_history}, {input} and {agent_scratchpad}',
rows: 4,
default: defaultSystemMessage,
additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
}
async init(): Promise<any> {
return null
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
usedTools = res.usedTools
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
if (res.usedTools) {
usedTools = res.usedTools
}
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
let finalRes = res?.output
if (sourceDocuments.length || usedTools.length) {
finalRes = { text: res?.output }
if (sourceDocuments.length) {
finalRes.sourceDocuments = flatten(sourceDocuments)
}
if (usedTools.length) {
finalRes.usedTools = usedTools
}
return finalRes
}
return finalRes
}
}
const prepareAgent = async (
nodeData: INodeData,
options: ICommonObject,
flowObj: { sessionId?: string; chatId?: string; input?: string }
) => {
const model = nodeData.inputs?.model as BaseChatModel
const maxIterations = nodeData.inputs?.maxIterations as string
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const prependMessages = options?.prependMessages
let promptMessage = systemMessage ? systemMessage : defaultSystemMessage
if (memory.memoryKey) promptMessage = promptMessage.replaceAll('{chat_history}', `{${memory.memoryKey}}`)
if (memory.inputKey) promptMessage = promptMessage.replaceAll('{input}', `{${memory.inputKey}}`)
const prompt = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(promptMessage),
new MessagesPlaceholder('agent_scratchpad')
])
const missingVariables = ['tools', 'agent_scratchpad'].filter((v) => !prompt.inputVariables.includes(v))
if (missingVariables.length > 0) {
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`)
}
const llmWithStop = model.bind({ stop: ['</tool_input>', '</final_answer>'] })
const messages = (await memory.getChatMessages(flowObj.sessionId, false, prependMessages)) as IMessage[]
let chatHistoryMsgTxt = ''
for (const message of messages) {
if (message.type === 'apiMessage') {
chatHistoryMsgTxt += `\\nAI:${message.message}`
} else if (message.type === 'userMessage') {
chatHistoryMsgTxt += `\\nHuman:${message.message}`
}
}
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => formatLogToMessage(i.steps),
tools: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) =>
tools.map((tool: Tool) => `${tool.name}: ${tool.description}`),
[memoryKey]: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) => chatHistoryMsgTxt
},
prompt,
llmWithStop,
new XMLAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
isXML: true,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
}
module.exports = { nodeClass: XMLAgent_Agents }

View File

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-file-type-xml" width="24" height="24" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M14 3v4a1 1 0 0 0 1 1h4" /><path d="M5 12v-7a2 2 0 0 1 2 -2h7l5 5v4" /><path d="M4 15l4 6" /><path d="M4 21l4 -6" /><path d="M19 15v6h3" /><path d="M11 21v-6l2.5 3l2.5 -3v6" /></svg>

Before

Width:  |  Height:  |  Size: 476 B

View File

@ -1,3 +0,0 @@
<svg width="38" height="52" viewBox="0 0 38 52" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M0 12.383V41.035C0 41.392 0.190002 41.723 0.500002 41.901L17.095 51.481C17.25 51.571 17.422 51.616 17.595 51.616C17.768 51.616 17.94 51.571 18.095 51.481L37.279 40.409C37.589 40.23 37.779 39.9 37.779 39.543V10.887C37.779 10.53 37.589 10.199 37.279 10.021L31.168 6.49498C31.014 6.40598 30.841 6.36098 30.669 6.36098C30.496 6.36098 30.323 6.40498 30.169 6.49498L27.295 8.15398V4.83698C27.295 4.47998 27.105 4.14898 26.795 3.97098L20.684 0.441982C20.529 0.352982 20.357 0.307983 20.184 0.307983C20.011 0.307983 19.839 0.352982 19.684 0.441982L13.781 3.85098C13.471 4.02998 13.281 4.35998 13.281 4.71698V12.157L12.921 12.365V11.872C12.921 11.515 12.731 11.185 12.421 11.006L7.405 8.10698C7.25 8.01798 7.077 7.97298 6.905 7.97298C6.733 7.97298 6.56 8.01798 6.405 8.10698L0.501001 11.517C0.191001 11.695 0 12.025 0 12.383ZM1.5 13.248L5.519 15.566V23.294C5.519 23.304 5.524 23.313 5.525 23.323C5.526 23.345 5.529 23.366 5.534 23.388C5.538 23.411 5.544 23.433 5.552 23.455C5.559 23.476 5.567 23.496 5.577 23.516C5.582 23.525 5.581 23.535 5.587 23.544C5.591 23.551 5.6 23.554 5.604 23.561C5.617 23.581 5.63 23.6 5.646 23.618C5.669 23.644 5.695 23.665 5.724 23.686C5.741 23.698 5.751 23.716 5.77 23.727L11.236 26.886C11.243 26.89 11.252 26.888 11.26 26.892C11.328 26.927 11.402 26.952 11.484 26.952C11.566 26.952 11.641 26.928 11.709 26.893C11.728 26.883 11.743 26.87 11.761 26.858C11.812 26.823 11.855 26.781 11.89 26.731C11.898 26.719 11.911 26.715 11.919 26.702C11.924 26.693 11.924 26.682 11.929 26.674C11.944 26.644 11.951 26.613 11.96 26.58C11.969 26.547 11.978 26.515 11.98 26.481C11.98 26.471 11.986 26.462 11.986 26.452V20.138V19.302L17.096 22.251V49.749L1.5 40.747V13.248ZM35.778 10.887L30.879 13.718L25.768 10.766L26.544 10.317L30.668 7.93698L35.778 10.887ZM25.293 4.83598L20.391 7.66498L15.281 4.71598L20.183 1.88398L25.293 4.83598ZM10.92 11.872L6.019 14.701L2.001 12.383L6.904 9.55098L10.92 11.872ZM20.956 16.51L24.268 14.601V18.788C24.268 18.809 24.278 18.827 24.28 18.848C24.284 18.883 24.29 18.917 24.301 18.95C24.311 18.98 24.325 19.007 24.342 19.034C24.358 19.061 24.373 19.088 24.395 19.112C24.417 19.138 24.444 19.159 24.471 19.18C24.489 19.193 24.499 19.21 24.518 19.221L29.878 22.314L23.998 25.708V18.557C23.998 18.547 23.993 18.538 23.992 18.528C23.991 18.506 23.988 18.485 23.984 18.463C23.979 18.44 23.973 18.418 23.965 18.396C23.958 18.375 23.95 18.355 23.941 18.336C23.936 18.327 23.937 18.316 23.931 18.308C23.925 18.299 23.917 18.294 23.911 18.286C23.898 18.267 23.886 18.251 23.871 18.234C23.855 18.216 23.84 18.2 23.822 18.185C23.805 18.17 23.788 18.157 23.769 18.144C23.76 18.138 23.756 18.129 23.747 18.124L20.956 16.51ZM25.268 11.633L30.379 14.585V21.448L25.268 18.499V13.736V11.633ZM12.486 18.437L17.389 15.604L22.498 18.556L17.595 21.385L12.486 18.437ZM10.985 25.587L7.019 23.295L10.985 21.005V25.587ZM12.42 14.385L14.28 13.311L16.822 14.777L12.42 17.32V14.385ZM14.78 5.58198L19.891 8.53098V15.394L14.78 12.445V5.58198Z" fill="#213B41"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.0 KiB

View File

@ -1,33 +0,0 @@
import { INode, INodeParams } from '../../../src/Interface'
class LangWatch_Analytic implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs?: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'LangWatch'
this.name = 'LangWatch'
this.version = 1.0
this.type = 'LangWatch'
this.icon = 'LangWatch.svg'
this.category = 'Analytic'
this.baseClasses = [this.type]
this.inputs = []
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['langwatchApi']
}
}
}
module.exports = { nodeClass: LangWatch_Analytic }

View File

@ -3,9 +3,6 @@ import { APIChain, createOpenAPIChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { getFileFromStorage } from '../../../src'
class OpenApiChain_Chains implements INode {
label: string
@ -21,7 +18,7 @@ class OpenApiChain_Chains implements INode {
constructor() {
this.label = 'OpenAPI Chain'
this.name = 'openApiChain'
this.version = 2.0
this.version = 1.0
this.type = 'OpenAPIChain'
this.icon = 'openapi.svg'
this.category = 'Chains'
@ -53,37 +50,19 @@ class OpenApiChain_Chains implements INode {
type: 'json',
additionalParams: true,
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return await initChain(nodeData, options)
async init(nodeData: INodeData): Promise<any> {
return await initChain(nodeData)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chain = await initChain(nodeData, options)
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = await initChain(nodeData)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAPI chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const res = await chain.run(input, [loggerHandler, handler, ...callbacks])
@ -95,7 +74,7 @@ class OpenApiChain_Chains implements INode {
}
}
const initChain = async (nodeData: INodeData, options: ICommonObject) => {
const initChain = async (nodeData: INodeData) => {
const model = nodeData.inputs?.model as ChatOpenAI
const headers = nodeData.inputs?.headers as string
const yamlLink = nodeData.inputs?.yamlLink as string
@ -106,17 +85,10 @@ const initChain = async (nodeData: INodeData, options: ICommonObject) => {
if (yamlLink) {
yamlString = yamlLink
} else {
if (yamlFileBase64.startsWith('FILE-STORAGE::')) {
const file = yamlFileBase64.replace('FILE-STORAGE::', '')
const chatflowid = options.chatflowid
const fileData = await getFileFromStorage(file, chatflowid)
yamlString = fileData.toString()
} else {
const splitDataURI = yamlFileBase64.split(',')
splitDataURI.pop()
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
yamlString = bf.toString('utf-8')
}
const splitDataURI = yamlFileBase64.split(',')
splitDataURI.pop()
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
yamlString = bf.toString('utf-8')
}
return await createOpenAPIChain(yamlString, {

View File

@ -1,30 +1,14 @@
import { ConversationChain } from 'langchain/chains'
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
BaseMessagePromptTemplateLike,
PromptTemplate
} from '@langchain/core/prompts'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { StringOutputParser } from '@langchain/core/output_parsers'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { HumanMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import {
IVisionChatModal,
FlowiseMemory,
ICommonObject,
INode,
INodeData,
INodeParams,
MessageContentImageUrl
} from '../../../src/Interface'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
@ -111,7 +95,7 @@ class ConversationChain_Chains implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory
const chain = await prepareChain(nodeData, options, this.sessionId)
const chain = prepareChain(nodeData, options, this.sessionId)
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
@ -161,32 +145,16 @@ class ConversationChain_Chains implements INode {
}
}
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => {
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
const memory = nodeData.inputs?.memory as FlowiseMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
let model = nodeData.inputs?.model as BaseChatModel
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
const sysPrompt = chatPromptTemplate.promptMessages[0]
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
// OpenAI works better when separate images into standalone human messages
if (model instanceof ChatOpenAI && humanImageMessages.length) {
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
} else if (humanImageMessages.length) {
const lastMessage = messages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...humanImageMessages,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
messages.push(msg)
}
if (humanImageMessages.length) messages.push(...humanImageMessages)
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
if ((chatPromptTemplate as any).promptValues) {
@ -197,44 +165,46 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageConte
return chatPrompt
}
const messages: BaseMessagePromptTemplateLike[] = [
const messages = [
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
]
// OpenAI works better when separate images into standalone human messages
if (model instanceof ChatOpenAI && humanImageMessages.length) {
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
} else if (humanImageMessages.length) {
messages.pop()
messages.push(HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages]))
}
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
return chatPrompt
}
const prepareChain = async (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
let model = nodeData.inputs?.model as BaseChatModel
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
const chatHistory = options.chatHistory
let model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
const prependMessages = options?.prependMessages
let messageContent: MessageContentImageUrl[] = []
if (llmSupportsVision(model)) {
messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
const visionChatModel = model as IVisionChatModal
let humanImageMessages: HumanMessage[] = []
if (model instanceof ChatOpenAI) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
visionChatModel.setVisionModel()
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
} else {
// revert to previous values if image upload is empty
visionChatModel.revertToOriginalModel()
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
const chatPrompt = prepareChatPrompt(nodeData, messageContent)
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
let promptVariables = {}
const promptValuesRaw = (chatPrompt as any).promptValues
if (promptValuesRaw) {
@ -253,12 +223,12 @@ const prepareChain = async (nodeData: INodeData, options: ICommonObject, session
{
[inputKey]: (input: { input: string }) => input.input,
[memoryKey]: async () => {
const history = await memory.getChatMessages(sessionId, true, prependMessages)
const history = await memory.getChatMessages(sessionId, true, chatHistory)
return history
},
...promptVariables
},
prepareChatPrompt(nodeData, messageContent),
prepareChatPrompt(nodeData, humanImageMessages),
model,
new StringOutputParser()
])

View File

@ -1,29 +1,17 @@
import { applyPatch } from 'fast-json-patch'
import { DataSource } from 'typeorm'
import { BaseLanguageModel } from '@langchain/core/language_models/base'
import { BaseRetriever } from '@langchain/core/retrievers'
import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables'
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { StringOutputParser } from '@langchain/core/output_parsers'
import type { Document } from '@langchain/core/documents'
import { BufferMemoryInput } from 'langchain/memory'
import { ConversationalRetrievalQAChain } from 'langchain/chains'
import { getBaseClasses, mapChatMessageToBaseMessage } from '../../../src/utils'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
import {
FlowiseMemory,
ICommonObject,
IMessage,
INode,
INodeData,
INodeParams,
IDatabaseEntity,
MemoryMethods
} from '../../../src/Interface'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
type RetrievalChainInput = {
@ -48,7 +36,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval QA Chain'
this.name = 'conversationalRetrievalQAChain'
this.version = 3.0
this.version = 2.0
this.type = 'ConversationalRetrievalQAChain'
this.icon = 'qa.svg'
this.category = 'Chains'
@ -99,14 +87,6 @@ class ConversationalRetrievalQAChain_Chains implements INode {
additionalParams: true,
optional: true,
default: RESPONSE_TEMPLATE
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
/** Deprecated
{
@ -175,11 +155,6 @@ class ConversationalRetrievalQAChain_Chains implements INode {
const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
const responsePrompt = nodeData.inputs?.responsePrompt as string
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const prependMessages = options?.prependMessages
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const chatflowid = options.chatflowid as string
let customResponsePrompt = responsePrompt
// If the deprecated systemMessagePrompt is still exists
@ -188,30 +163,17 @@ class ConversationalRetrievalQAChain_Chains implements INode {
}
let memory: FlowiseMemory | undefined = externalMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (!memory) {
memory = new BufferMemory({
returnMessages: true,
memoryKey: 'chat_history',
appDataSource,
databaseEntities,
chatflowid
inputKey: 'input'
})
}
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Conversational Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
const history = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const additionalCallback = await additionalCallbacks(nodeData, options)
@ -384,67 +346,31 @@ const createChain = (
return conversationalQAChain
}
interface BufferMemoryExtendedInput {
appDataSource: DataSource
databaseEntities: IDatabaseEntity
chatflowid: string
}
class BufferMemory extends FlowiseMemory implements MemoryMethods {
appDataSource: DataSource
databaseEntities: IDatabaseEntity
chatflowid: string
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
constructor(fields: BufferMemoryInput) {
super(fields)
this.appDataSource = fields.appDataSource
this.databaseEntities = fields.databaseEntities
this.chatflowid = fields.chatflowid
}
async getChatMessages(
overrideSessionId = '',
returnBaseMessages = false,
prependMessages?: IMessage[]
): Promise<IMessage[] | BaseMessage[]> {
if (!overrideSessionId) return []
async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
await this.chatHistory.clear()
const chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({
where: {
sessionId: overrideSessionId,
chatflowid: this.chatflowid
},
order: {
createdDate: 'ASC'
}
})
if (prependMessages?.length) {
chatMessage.unshift(...prependMessages)
for (const msg of prevHistory) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
if (returnBaseMessages) {
return mapChatMessageToBaseMessage(chatMessage)
}
let returnIMessages: IMessage[] = []
for (const m of chatMessage) {
returnIMessages.push({
message: m.content as string,
type: m.role
})
}
return returnIMessages
const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(): Promise<void> {
// adding chat messages is done on server level
// adding chat messages will be done on the fly in getChatMessages()
return
}
async clearChatMessages(): Promise<void> {
// clearing chat messages is done on server level
return
await this.clear()
}
}

View File

@ -1,15 +1,16 @@
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains'
import { IVisionChatModal, ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { HumanMessage } from 'langchain/schema'
class LLMChain_Chains implements INode {
label: string
@ -110,9 +111,7 @@ class LLMChain_Chains implements INode {
})
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
// Disable streaming because its not final chain
const disableStreaming = true
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData, disableStreaming)
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
// eslint-disable-next-line no-console
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
// eslint-disable-next-line no-console
@ -156,16 +155,16 @@ const runPrediction = async (
input: string,
promptValuesRaw: ICommonObject | undefined,
options: ICommonObject,
nodeData: INodeData,
disableStreaming?: boolean
nodeData: INodeData
) => {
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
const isStreaming = !disableStreaming && options.socketIO && options.socketIOClientId
const isStreaming = options.socketIO && options.socketIOClientId
const socketIO = isStreaming ? options.socketIO : undefined
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
const moderations = nodeData.inputs?.inputModeration as Moderation[]
let model = nodeData.inputs?.model as ChatOpenAI
if (moderations && moderations.length > 0) {
try {
@ -184,39 +183,24 @@ const runPrediction = async (
* TO: { "value": "hello i am ben\n\n\thow are you?" }
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (llmSupportsVision(chain.llm)) {
const visionChatModel = chain.llm as IVisionChatModal
const messageContent = await addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
if (chain.llm instanceof ChatOpenAI) {
const chatOpenAI = chain.llm as ChatOpenAI
if (messageContent?.length) {
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
visionChatModel.setVisionModel()
chatOpenAI.modelName = 'gpt-4-vision-preview'
chatOpenAI.maxTokens = 1024
// Add image to the message
if (chain.prompt instanceof PromptTemplate) {
const existingPromptTemplate = chain.prompt.template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: existingPromptTemplate
}
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
])
msg.inputVariables = chain.prompt.inputVariables
chain.prompt = ChatPromptTemplate.fromMessages([msg])
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
chain.prompt = newChatPromptTemplate
} else if (chain.prompt instanceof ChatPromptTemplate) {
if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
chain.prompt.promptMessages.push(msg)
} else {
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
}
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
} else if (chain.prompt instanceof FewShotPromptTemplate) {
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
@ -228,7 +212,8 @@ const runPrediction = async (
}
} else {
// revert to previous values if image upload is empty
visionChatModel.revertToOriginalModel()
chatOpenAI.modelName = model.configuredModel
chatOpenAI.maxTokens = model.configuredMaxToken
}
}

View File

@ -3,8 +3,6 @@ import { MultiPromptChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MultiPromptChain_Chains implements INode {
label: string
@ -20,7 +18,7 @@ class MultiPromptChain_Chains implements INode {
constructor() {
this.label = 'Multi Prompt Chain'
this.name = 'multiPromptChain'
this.version = 2.0
this.version = 1.0
this.type = 'MultiPromptChain'
this.icon = 'prompt.svg'
this.category = 'Chains'
@ -37,14 +35,6 @@ class MultiPromptChain_Chains implements INode {
name: 'promptRetriever',
type: 'PromptRetriever',
list: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -72,19 +62,8 @@ class MultiPromptChain_Chains implements INode {
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = nodeData.instance as MultiPromptChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Multi Prompt Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { input }
const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -3,8 +3,6 @@ import { MultiRetrievalQAChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MultiRetrievalQAChain_Chains implements INode {
label: string
@ -20,7 +18,7 @@ class MultiRetrievalQAChain_Chains implements INode {
constructor() {
this.label = 'Multi Retrieval QA Chain'
this.name = 'multiRetrievalQAChain'
this.version = 2.0
this.version = 1.0
this.type = 'MultiRetrievalQAChain'
this.icon = 'qa.svg'
this.category = 'Chains'
@ -43,14 +41,6 @@ class MultiRetrievalQAChain_Chains implements INode {
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -82,17 +72,7 @@ class MultiRetrievalQAChain_Chains implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const chain = nodeData.instance as MultiRetrievalQAChain
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Multi Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { input }
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -4,8 +4,6 @@ import { RetrievalQAChain } from 'langchain/chains'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class RetrievalQAChain_Chains implements INode {
label: string
@ -21,7 +19,7 @@ class RetrievalQAChain_Chains implements INode {
constructor() {
this.label = 'Retrieval QA Chain'
this.name = 'retrievalQAChain'
this.version = 2.0
this.version = 1.0
this.type = 'RetrievalQAChain'
this.icon = 'qa.svg'
this.category = 'Chains'
@ -37,14 +35,6 @@ class RetrievalQAChain_Chains implements INode {
label: 'Vector Store Retriever',
name: 'vectorStoreRetriever',
type: 'BaseRetriever'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -57,19 +47,8 @@ class RetrievalQAChain_Chains implements INode {
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = nodeData.instance as RetrievalQAChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = {
query: input
}

View File

@ -7,8 +7,6 @@ import { SqlDatabase } from 'langchain/sql_db'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, getInputVariables } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
@ -26,7 +24,7 @@ class SqlDatabaseChain_Chains implements INode {
constructor() {
this.label = 'Sql Database Chain'
this.name = 'sqlDatabaseChain'
this.version = 5.0
this.version = 4.0
this.type = 'SqlDatabaseChain'
this.icon = 'sqlchain.svg'
this.category = 'Chains'
@ -72,7 +70,7 @@ class SqlDatabaseChain_Chains implements INode {
label: 'Include Tables',
name: 'includesTables',
type: 'string',
description: 'Tables to include for queries, separated by comma. Can only use Include Tables or Ignore Tables',
description: 'Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables',
placeholder: 'table1, table2',
additionalParams: true,
optional: true
@ -81,7 +79,7 @@ class SqlDatabaseChain_Chains implements INode {
label: 'Ignore Tables',
name: 'ignoreTables',
type: 'string',
description: 'Tables to ignore for queries, separated by comma. Can only use Ignore Tables or Include Tables',
description: 'Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables',
placeholder: 'table1, table2',
additionalParams: true,
optional: true
@ -117,14 +115,6 @@ class SqlDatabaseChain_Chains implements INode {
placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
additionalParams: true,
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -154,7 +144,7 @@ class SqlDatabaseChain_Chains implements INode {
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const databaseType = nodeData.inputs?.database as DatabaseType
const model = nodeData.inputs?.model as BaseLanguageModel
const url = nodeData.inputs?.url as string
@ -165,17 +155,6 @@ class SqlDatabaseChain_Chains implements INode {
const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number
const topK = nodeData.inputs?.topK as number
const customPrompt = nodeData.inputs?.customPrompt as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Sql Database Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const chain = await getSQLDBChain(
databaseType,

View File

@ -4,8 +4,6 @@ import { VectaraStore } from '@langchain/community/vectorstores/vectara'
import { VectorDBQAChain } from 'langchain/chains'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
// functionality based on https://github.com/vectara/vectara-answer
const reorderCitations = (unorderedSummary: string) => {
@ -50,7 +48,7 @@ class VectaraChain_Chains implements INode {
constructor() {
this.label = 'Vectara QA Chain'
this.name = 'vectaraQAChain'
this.version = 2.0
this.version = 1.0
this.type = 'VectaraQAChain'
this.icon = 'vectara.png'
this.category = 'Chains'
@ -221,14 +219,6 @@ class VectaraChain_Chains implements INode {
description: 'Maximum results used to build the summarized response',
type: 'number',
default: 7
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -237,7 +227,7 @@ class VectaraChain_Chains implements INode {
return null
}
async run(nodeData: INodeData, input: string): Promise<string | object> {
async run(nodeData: INodeData, input: string): Promise<object> {
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
@ -262,18 +252,6 @@ class VectaraChain_Chains implements INode {
const mmrRerankerId = 272725718
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Vectara chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const data = {
query: [
{

View File

@ -4,8 +4,6 @@ import { VectorDBQAChain } from 'langchain/chains'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class VectorDBQAChain_Chains implements INode {
label: string
@ -21,7 +19,7 @@ class VectorDBQAChain_Chains implements INode {
constructor() {
this.label = 'VectorDB QA Chain'
this.name = 'vectorDBQAChain'
this.version = 2.0
this.version = 1.0
this.type = 'VectorDBQAChain'
this.icon = 'vectordb.svg'
this.category = 'Chains'
@ -37,14 +35,6 @@ class VectorDBQAChain_Chains implements INode {
label: 'Vector Store',
name: 'vectorStore',
type: 'VectorStore'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
@ -60,20 +50,8 @@ class VectorDBQAChain_Chains implements INode {
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = nodeData.instance as VectorDBQAChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the VectorDB QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = {
query: input
}

View File

@ -1,12 +1,15 @@
import { BedrockChat } from '@langchain/community/chat_models/bedrock'
import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { BedrockChat } from './FlowiseAWSChatBedrock'
import { getModels, getRegions, MODEL_TYPE } from '../../../src/modelLoader'
/**
* I had to run the following to build the component
* and get the icon copied over to the dist directory
* Flowise/packages/components > yarn build
*
* @author Michael Connor <mlconnor@yahoo.com>
*/
class AWSChatBedrock_ChatModels implements INode {
@ -24,7 +27,7 @@ class AWSChatBedrock_ChatModels implements INode {
constructor() {
this.label = 'AWS ChatBedrock'
this.name = 'awsChatBedrock'
this.version = 5.0
this.version = 3.0
this.type = 'AWSChatBedrock'
this.icon = 'aws.svg'
this.category = 'Chat Models'
@ -47,16 +50,57 @@ class AWSChatBedrock_ChatModels implements INode {
{
label: 'Region',
name: 'region',
type: 'asyncOptions',
loadMethod: 'listRegions',
type: 'options',
options: [
{ label: 'af-south-1', name: 'af-south-1' },
{ label: 'ap-east-1', name: 'ap-east-1' },
{ label: 'ap-northeast-1', name: 'ap-northeast-1' },
{ label: 'ap-northeast-2', name: 'ap-northeast-2' },
{ label: 'ap-northeast-3', name: 'ap-northeast-3' },
{ label: 'ap-south-1', name: 'ap-south-1' },
{ label: 'ap-south-2', name: 'ap-south-2' },
{ label: 'ap-southeast-1', name: 'ap-southeast-1' },
{ label: 'ap-southeast-2', name: 'ap-southeast-2' },
{ label: 'ap-southeast-3', name: 'ap-southeast-3' },
{ label: 'ap-southeast-4', name: 'ap-southeast-4' },
{ label: 'ap-southeast-5', name: 'ap-southeast-5' },
{ label: 'ap-southeast-6', name: 'ap-southeast-6' },
{ label: 'ca-central-1', name: 'ca-central-1' },
{ label: 'ca-west-1', name: 'ca-west-1' },
{ label: 'cn-north-1', name: 'cn-north-1' },
{ label: 'cn-northwest-1', name: 'cn-northwest-1' },
{ label: 'eu-central-1', name: 'eu-central-1' },
{ label: 'eu-central-2', name: 'eu-central-2' },
{ label: 'eu-north-1', name: 'eu-north-1' },
{ label: 'eu-south-1', name: 'eu-south-1' },
{ label: 'eu-south-2', name: 'eu-south-2' },
{ label: 'eu-west-1', name: 'eu-west-1' },
{ label: 'eu-west-2', name: 'eu-west-2' },
{ label: 'eu-west-3', name: 'eu-west-3' },
{ label: 'il-central-1', name: 'il-central-1' },
{ label: 'me-central-1', name: 'me-central-1' },
{ label: 'me-south-1', name: 'me-south-1' },
{ label: 'sa-east-1', name: 'sa-east-1' },
{ label: 'us-east-1', name: 'us-east-1' },
{ label: 'us-east-2', name: 'us-east-2' },
{ label: 'us-gov-east-1', name: 'us-gov-east-1' },
{ label: 'us-gov-west-1', name: 'us-gov-west-1' },
{ label: 'us-west-1', name: 'us-west-1' },
{ label: 'us-west-2', name: 'us-west-2' }
],
default: 'us-east-1'
},
{
label: 'Model Name',
name: 'model',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'anthropic.claude-3-haiku'
type: 'options',
options: [
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
],
default: 'anthropic.claude-v2'
},
{
label: 'Custom Model Name',
@ -84,29 +128,10 @@ class AWSChatBedrock_ChatModels implements INode {
optional: true,
additionalParams: true,
default: 200
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Only works with claude-3-* models when image is being uploaded from chat. Compatible with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'awsChatBedrock')
},
async listRegions(): Promise<INodeOptionsValue[]> {
return await getRegions(MODEL_TYPE.CHAT, 'awsChatBedrock')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const iRegion = nodeData.inputs?.region as string
const iModel = nodeData.inputs?.model as string
@ -145,16 +170,7 @@ class AWSChatBedrock_ChatModels implements INode {
}
if (cache) obj.cache = cache
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false
}
}
const amazonBedrock = new BedrockChat(nodeData.id, obj)
if (obj.model.includes('anthropic.claude-3')) amazonBedrock.setMultiModalOption(multiModalOption)
const amazonBedrock = new BedrockChat(obj)
return amazonBedrock
}
}

View File

@ -1,34 +0,0 @@
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BedrockChat as LCBedrockChat } from '@langchain/community/chat_models/bedrock'
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
import { IVisionChatModal, IMultiModalOption } from '../../../src'
export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields: BaseBedrockInput & BaseChatModelParams) {
super(fields)
this.id = id
this.configuredModel = fields?.model || ''
this.configuredMaxToken = fields?.maxTokens
}
revertToOriginalModel(): void {
super.model = this.configuredModel
super.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
if (!this.model.startsWith('claude-3')) {
super.model = 'anthropic.claude-3-haiku-20240307-v1:0'
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
}
}
}

View File

@ -1,10 +1,8 @@
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class AzureChatOpenAI_ChatModels implements INode {
label: string
@ -21,12 +19,12 @@ class AzureChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'Azure ChatOpenAI'
this.name = 'azureChatOpenAI'
this.version = 4.0
this.version = 2.0
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -43,8 +41,27 @@ class AzureChatOpenAI_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels'
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-35-turbo',
name: 'gpt-35-turbo'
},
{
label: 'gpt-35-turbo-16k',
name: 'gpt-35-turbo-16k'
}
],
default: 'gpt-35-turbo',
optional: true
},
{
label: 'Temperature',
@ -62,14 +79,6 @@ class AzureChatOpenAI_ChatModels implements INode {
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Frequency Penalty',
name: 'frequencyPenalty',
@ -93,49 +102,10 @@ class AzureChatOpenAI_ChatModels implements INode {
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
},
{
label: 'Image Resolution',
description: 'This parameter controls the resolution in which the model views the image.',
name: 'imageResolution',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'High',
name: 'high'
},
{
label: 'Auto',
name: 'auto'
}
],
default: 'low',
optional: false,
additionalParams: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'azureChatOpenAI')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as string
const temperature = nodeData.inputs?.temperature as string
@ -145,7 +115,6 @@ class AzureChatOpenAI_ChatModels implements INode {
const timeout = nodeData.inputs?.timeout as string
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.cache as BaseCache
const topP = nodeData.inputs?.topP as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@ -153,9 +122,6 @@ class AzureChatOpenAI_ChatModels implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
temperature: parseFloat(temperature),
modelName,
@ -171,17 +137,8 @@ class AzureChatOpenAI_ChatModels implements INode {
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (topP) obj.topP = parseFloat(topP)
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false,
imageResolution
}
}
const model = new ChatOpenAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
const model = new ChatOpenAI(obj)
return model
}
}

View File

@ -1,7 +1,6 @@
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI } from 'llamaindex'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
interface AzureOpenAIConfig {
apiKey?: string
@ -10,28 +9,6 @@ interface AzureOpenAIConfig {
deploymentName?: string
}
const ALL_AZURE_OPENAI_CHAT_MODELS = {
'gpt-35-turbo': { contextWindow: 4096, openAIModel: 'gpt-3.5-turbo' },
'gpt-35-turbo-16k': {
contextWindow: 16384,
openAIModel: 'gpt-3.5-turbo-16k'
},
'gpt-4': { contextWindow: 8192, openAIModel: 'gpt-4' },
'gpt-4-32k': { contextWindow: 32768, openAIModel: 'gpt-4-32k' },
'gpt-4-turbo': {
contextWindow: 128000,
openAIModel: 'gpt-4-turbo'
},
'gpt-4-vision-preview': {
contextWindow: 128000,
openAIModel: 'gpt-4-vision-preview'
},
'gpt-4-1106-preview': {
contextWindow: 128000,
openAIModel: 'gpt-4-1106-preview'
}
}
class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
label: string
name: string
@ -48,7 +25,7 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
constructor() {
this.label = 'AzureChatOpenAI'
this.name = 'azureChatOpenAI_LlamaIndex'
this.version = 2.0
this.version = 1.0
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
@ -65,9 +42,27 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'gpt-3.5-turbo-16k'
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-16k',
name: 'gpt-3.5-turbo-16k'
}
],
default: 'gpt-3.5-turbo-16k',
optional: true
},
{
label: 'Temperature',
@ -104,15 +99,8 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'azureChatOpenAI_LlamaIndex')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AZURE_OPENAI_CHAT_MODELS
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
const temperature = nodeData.inputs?.temperature as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string

View File

@ -1,10 +1,8 @@
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { AnthropicInput, ChatAnthropic } from '@langchain/anthropic'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatAnthropic } from './FlowiseChatAnthropic'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class ChatAnthropic_ChatModels implements INode {
label: string
@ -21,12 +19,12 @@ class ChatAnthropic_ChatModels implements INode {
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic'
this.version = 6.0
this.version = 3.0
this.type = 'ChatAnthropic'
this.icon = 'Anthropic.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)]
this.baseClasses = [this.type, ...getBaseClasses(ChatAnthropic)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -43,9 +41,70 @@ class ChatAnthropic_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'claude-3-haiku'
type: 'options',
options: [
{
label: 'claude-2',
name: 'claude-2',
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-2.1',
name: 'claude-2.1',
description: 'Claude 2 latest full version'
},
{
label: 'claude-instant-1',
name: 'claude-instant-1',
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-v1',
name: 'claude-v1'
},
{
label: 'claude-v1-100k',
name: 'claude-v1-100k'
},
{
label: 'claude-v1.0',
name: 'claude-v1.0'
},
{
label: 'claude-v1.2',
name: 'claude-v1.2'
},
{
label: 'claude-v1.3',
name: 'claude-v1.3'
},
{
label: 'claude-v1.3-100k',
name: 'claude-v1.3-100k'
},
{
label: 'claude-instant-v1',
name: 'claude-instant-v1'
},
{
label: 'claude-instant-v1-100k',
name: 'claude-instant-v1-100k'
},
{
label: 'claude-instant-v1.0',
name: 'claude-instant-v1.0'
},
{
label: 'claude-instant-v1.1',
name: 'claude-instant-v1.1'
},
{
label: 'claude-instant-v1.1-100k',
name: 'claude-instant-v1.1-100k'
}
],
default: 'claude-2',
optional: true
},
{
label: 'Temperature',
@ -78,30 +137,14 @@ class ChatAnthropic_ChatModels implements INode {
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatAnthropic')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokensToSample as string
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean
@ -110,8 +153,6 @@ class ChatAnthropic_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
@ -119,19 +160,12 @@ class ChatAnthropic_ChatModels implements INode {
streaming: streaming ?? true
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (maxTokensToSample) obj.maxTokensToSample = parseInt(maxTokensToSample, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false
}
}
const model = new ChatAnthropic(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
const model = new ChatAnthropic(obj)
return model
}
}

View File

@ -1,5 +1,4 @@
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Anthropic } from 'llamaindex'
@ -19,7 +18,7 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic_LlamaIndex'
this.version = 3.0
this.version = 1.0
this.type = 'ChatAnthropic'
this.icon = 'Anthropic.svg'
this.category = 'Chat Models'
@ -36,9 +35,21 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'claude-3-haiku'
type: 'options',
options: [
{
label: 'claude-2',
name: 'claude-2',
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-instant-1',
name: 'claude-instant-1',
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
}
],
default: 'claude-2',
optional: true
},
{
label: 'Temperature',
@ -67,16 +78,9 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatAnthropic_LlamaIndex')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as 'claude-3-opus' | 'claude-3-sonnet' | 'claude-2.1' | 'claude-instant-1.2'
const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
const topP = nodeData.inputs?.topP as string

View File

@ -1,33 +0,0 @@
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { IVisionChatModal, IMultiModalOption } from '../../../src'
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
configuredModel: string
configuredMaxToken: number
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName || ''
this.configuredMaxToken = fields?.maxTokens ?? 2048
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
if (!this.modelName.startsWith('claude-3')) {
super.modelName = 'claude-3-haiku-20240307'
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 2048
}
}
}

View File

@ -1,80 +0,0 @@
import { BaseCache } from '@langchain/core/caches'
import { ChatBaiduWenxin } from '@langchain/community/chat_models/baiduwenxin'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class ChatBaiduWenxin_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatBaiduWenxin'
this.name = 'chatBaiduWenxin'
this.version = 1.0
this.type = 'ChatBaiduWenxin'
this.icon = 'baiduwenxin.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around BaiduWenxin Chat Endpoints'
this.baseClasses = [this.type, ...getBaseClasses(ChatBaiduWenxin)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['baiduApi']
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Model',
name: 'modelName',
type: 'string',
placeholder: 'ERNIE-Bot-turbo'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const cache = nodeData.inputs?.cache as BaseCache
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const baiduApiKey = getCredentialParam('baiduApiKey', credentialData, nodeData)
const baiduSecretKey = getCredentialParam('baiduSecretKey', credentialData, nodeData)
const obj: Partial<ChatBaiduWenxin> = {
streaming: true,
baiduApiKey,
baiduSecretKey,
modelName,
temperature: temperature ? parseFloat(temperature) : undefined
}
if (cache) obj.cache = cache
const model = new ChatBaiduWenxin(obj)
return model
}
}
module.exports = { nodeClass: ChatBaiduWenxin_ChatModels }

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg xmlns="http://www.w3.org/2000/svg"
aria-label="Baidu" role="img"
viewBox="0 0 512 512"><rect
width="512" height="512"
rx="15%"
fill="#ffffff"/><path d="m131 251c41-9 35-58 34-68-2-17-21-45-48-43-33 3-37 50-37 50-5 22 10 70 51 61m76-82c22 0 40-26 40-58s-18-58-40-58c-23 0-41 26-41 58s18 58 41 58m96 4c31 4 50-28 54-53 4-24-16-52-37-57s-48 29-50 52c-3 27 3 54 33 58m120 41c0-12-10-47-46-47s-41 33-41 57c0 22 2 53 47 52s40-51 40-62m-46 102s-46-36-74-75c-36-57-89-34-106-5-18 29-45 48-49 53-4 4-56 33-44 84 11 52 52 51 52 51s30 3 65-5 65 2 65 2 81 27 104-25c22-53-13-80-13-80" fill="#2319dc"/><path d="m214 266v34h-28s-29 3-39 35c-3 21 4 34 5 36 1 3 10 19 33 23h53v-128zm-1 107h-21s-15-1-19-18c-3-7 0-16 1-20 1-3 6-11 17-14h22zm38-70v68s1 17 24 23h61v-91h-26v68h-25s-8-1-10-7v-61z" fill="#ffffff"/></svg>

Before

Width:  |  Height:  |  Size: 924 B

View File

@ -1,87 +0,0 @@
import { BaseCache } from '@langchain/core/caches'
import { ChatCohere, ChatCohereInput } from '@langchain/cohere'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
class ChatCohere_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatCohere'
this.name = 'chatCohere'
this.version = 1.0
this.type = 'ChatCohere'
this.icon = 'Cohere.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Cohere Chat Endpoints'
this.baseClasses = [this.type, ...getBaseClasses(ChatCohere)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['cohereApi']
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'command-r'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.7,
optional: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatCohere')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as string
const cache = nodeData.inputs?.cache as BaseCache
const temperature = nodeData.inputs?.temperature as string
const streaming = nodeData.inputs?.streaming as boolean
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
const obj: ChatCohereInput = {
model: modelName,
apiKey: cohereApiKey,
temperature: temperature ? parseFloat(temperature) : undefined,
streaming: streaming ?? true
}
if (cache) obj.cache = cache
const model = new ChatCohere(obj)
return model
}
}
module.exports = { nodeClass: ChatCohere_ChatModels }

View File

@ -1 +0,0 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M11.776 18.304c.64 0 1.92-.032 3.712-.768 2.08-.864 6.176-2.4 9.152-4 2.08-1.12 2.976-2.592 2.976-4.576 0-2.72-2.208-4.96-4.96-4.96h-11.52A7.143 7.143 0 0 0 4 11.136c0 3.936 3.008 7.168 7.776 7.168Z" fill="#39594D"/><path fill-rule="evenodd" clip-rule="evenodd" d="M13.728 23.2c0-1.92 1.152-3.68 2.944-4.416l3.616-1.504C23.968 15.776 28 18.464 28 22.432A5.572 5.572 0 0 1 22.432 28h-3.936c-2.624 0-4.768-2.144-4.768-4.8Z" fill="#D18EE2"/><path d="M8.128 19.232A4.138 4.138 0 0 0 4 23.36v.544C4 26.144 5.856 28 8.128 28a4.138 4.138 0 0 0 4.128-4.128v-.544c-.032-2.24-1.856-4.096-4.128-4.096Z" fill="#FF7759"/></svg>

Before

Width:  |  Height:  |  Size: 738 B

View File

@ -1,79 +0,0 @@
import { BaseCache } from '@langchain/core/caches'
import { ChatFireworks } from '@langchain/community/chat_models/fireworks'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class ChatFireworks_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatFireworks'
this.name = 'chatFireworks'
this.version = 1.0
this.type = 'ChatFireworks'
this.icon = 'Fireworks.png'
this.category = 'Chat Models'
this.description = 'Wrapper around Fireworks Chat Endpoints'
this.baseClasses = [this.type, ...getBaseClasses(ChatFireworks)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['fireworksApi']
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Model',
name: 'modelName',
type: 'string',
default: 'accounts/fireworks/models/llama-v2-13b-chat',
placeholder: 'accounts/fireworks/models/llama-v2-13b-chat'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const cache = nodeData.inputs?.cache as BaseCache
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const fireworksApiKey = getCredentialParam('fireworksApiKey', credentialData, nodeData)
const obj: Partial<ChatFireworks> = {
fireworksApiKey,
model: modelName,
modelName,
temperature: temperature ? parseFloat(temperature) : undefined
}
if (cache) obj.cache = cache
const model = new ChatFireworks(obj)
return model
}
}
module.exports = { nodeClass: ChatFireworks_ChatModels }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -1,10 +1,9 @@
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
import type { SafetySetting } from '@google/generative-ai'
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI'
class GoogleGenerativeAI_ChatModels implements INode {
label: string
@ -21,7 +20,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
constructor() {
this.label = 'ChatGoogleGenerativeAI'
this.name = 'chatGoogleGenerativeAI'
this.version = 2.0
this.version = 1.0
this.type = 'ChatGoogleGenerativeAI'
this.icon = 'GoogleGemini.svg'
this.category = 'Chat Models'
@ -45,8 +44,13 @@ class GoogleGenerativeAI_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
type: 'options',
options: [
{
label: 'gemini-pro',
name: 'gemini-pro'
}
],
default: 'gemini-pro'
},
{
@ -139,26 +143,10 @@ class GoogleGenerativeAI_ChatModels implements INode {
],
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Automatically uses vision model when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatGoogleGenerativeAI')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData)
@ -171,21 +159,20 @@ class GoogleGenerativeAI_ChatModels implements INode {
const harmCategory = nodeData.inputs?.harmCategory as string
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
const cache = nodeData.inputs?.cache as BaseCache
const streaming = nodeData.inputs?.streaming as boolean
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const obj: Partial<GoogleGenerativeAIChatInput> = {
apiKey: apiKey,
modelName: modelName,
streaming: streaming ?? true
maxOutputTokens: 2048
}
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
if (temperature) obj.temperature = parseFloat(temperature)
const model = new ChatGoogleGenerativeAI(obj)
if (topP) model.topP = parseFloat(topP)
if (topK) model.topK = parseFloat(topK)
if (cache) model.cache = cache
if (temperature) model.temperature = parseFloat(temperature)
// Safety Settings
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
@ -198,16 +185,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
threshold: harmBlockThresholds[index] as HarmBlockThreshold
}
})
if (safetySettings.length > 0) obj.safetySettings = safetySettings
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false
}
}
const model = new ChatGoogleGenerativeAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
if (safetySettings.length > 0) model.safetySettings = safetySettings
return model
}

View File

@ -1,581 +0,0 @@
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContent } from '@langchain/core/messages'
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
import { BaseChatModel, type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
import { ToolCall } from '@langchain/core/messages/tool'
import { NewTokenIndices } from '@langchain/core/callbacks/base'
import {
EnhancedGenerateContentResponse,
Content,
Part,
Tool,
GenerativeModel,
GoogleGenerativeAI as GenerativeAI
} from '@google/generative-ai'
import type { SafetySetting } from '@google/generative-ai'
import { ICommonObject, IMultiModalOption, IVisionChatModal } from '../../../src'
import { StructuredToolInterface } from '@langchain/core/tools'
import { isStructuredTool } from '@langchain/core/utils/function_calling'
import { zodToJsonSchema } from 'zod-to-json-schema'
interface TokenUsage {
completionTokens?: number
promptTokens?: number
totalTokens?: number
}
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
modelName?: string
model?: string
temperature?: number
maxOutputTokens?: number
topP?: number
topK?: number
stopSequences?: string[]
safetySettings?: SafetySetting[]
apiKey?: string
streaming?: boolean
}
class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGenerativeAIChatInput {
modelName = 'gemini-pro'
temperature?: number
maxOutputTokens?: number
topP?: number
topK?: number
stopSequences: string[] = []
safetySettings?: SafetySetting[]
apiKey?: string
streaming = false
private client: GenerativeModel
get _isMultimodalModel() {
return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5')
}
constructor(fields?: GoogleGenerativeAIChatInput) {
super(fields ?? {})
this.modelName = fields?.model?.replace(/^models\//, '') ?? fields?.modelName?.replace(/^models\//, '') ?? 'gemini-pro'
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens
if (this.maxOutputTokens && this.maxOutputTokens < 0) {
throw new Error('`maxOutputTokens` must be a positive integer')
}
this.temperature = fields?.temperature ?? this.temperature
if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
throw new Error('`temperature` must be in the range of [0.0,1.0]')
}
this.topP = fields?.topP ?? this.topP
if (this.topP && this.topP < 0) {
throw new Error('`topP` must be a positive integer')
}
if (this.topP && this.topP > 1) {
throw new Error('`topP` must be below 1.')
}
this.topK = fields?.topK ?? this.topK
if (this.topK && this.topK < 0) {
throw new Error('`topK` must be a positive integer')
}
this.stopSequences = fields?.stopSequences ?? this.stopSequences
this.apiKey = fields?.apiKey ?? process.env['GOOGLE_API_KEY']
if (!this.apiKey) {
throw new Error(
'Please set an API key for Google GenerativeAI ' +
'in the environment variable GOOGLE_API_KEY ' +
'or in the `apiKey` field of the ' +
'ChatGoogleGenerativeAI constructor'
)
}
this.safetySettings = fields?.safetySettings ?? this.safetySettings
if (this.safetySettings && this.safetySettings.length > 0) {
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category))
if (safetySettingsSet.size !== this.safetySettings.length) {
throw new Error('The categories in `safetySettings` array must be unique')
}
}
this.streaming = fields?.streaming ?? this.streaming
this.getClient()
}
getClient(tools?: Tool[]) {
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({
model: this.modelName,
tools,
safetySettings: this.safetySettings as SafetySetting[],
generationConfig: {
candidateCount: 1,
stopSequences: this.stopSequences,
maxOutputTokens: this.maxOutputTokens,
temperature: this.temperature,
topP: this.topP,
topK: this.topK
}
})
}
_combineLLMOutput() {
return []
}
_llmType() {
return 'googlegenerativeai'
}
override bindTools(tools: (StructuredToolInterface | Record<string, unknown>)[], kwargs?: Partial<ICommonObject>) {
//@ts-ignore
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs })
}
convertFunctionResponse(prompts: Content[]) {
for (let i = 0; i < prompts.length; i += 1) {
if (prompts[i].role === 'function') {
if (prompts[i - 1].role === 'model') {
const toolName = prompts[i - 1].parts[0].functionCall?.name ?? ''
prompts[i].parts = [
{
functionResponse: {
name: toolName,
response: {
name: toolName,
content: prompts[i].parts[0].text
}
}
}
]
}
}
}
}
async _generateNonStreaming(
prompt: Content[],
options: this['ParsedCallOptions'],
_runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
//@ts-ignore
const tools = options.tools ?? []
this.convertFunctionResponse(prompt)
if (tools.length > 0) {
this.getClient(tools)
} else {
this.getClient()
}
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
let output
try {
output = await this.client.generateContent({
contents: prompt
})
} catch (e: any) {
if (e.message?.includes('400 Bad Request')) {
e.status = 400
}
throw e
}
return output
})
const generationResult = mapGenerateContentResultToChatResult(res.response)
await _runManager?.handleLLMNewToken(generationResult.generations?.length ? generationResult.generations[0].text : '')
return generationResult
}
async _generate(
messages: BaseMessage[],
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
prompt = checkIfEmptyContentAndSameRole(prompt)
// Handle streaming
if (this.streaming) {
const tokenUsage: TokenUsage = {}
const stream = this._streamResponseChunks(messages, options, runManager)
const finalChunks: Record<number, ChatGenerationChunk> = {}
for await (const chunk of stream) {
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
if (finalChunks[index] === undefined) {
finalChunks[index] = chunk
} else {
finalChunks[index] = finalChunks[index].concat(chunk)
}
}
const generations = Object.entries(finalChunks)
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
.map(([_, value]) => value)
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
}
return this._generateNonStreaming(prompt, options, runManager)
}
async *_streamResponseChunks(
messages: BaseMessage[],
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
prompt = checkIfEmptyContentAndSameRole(prompt)
//@ts-ignore
if (options.tools !== undefined && options.tools.length > 0) {
const result = await this._generateNonStreaming(prompt, options, runManager)
const generationMessage = result.generations[0].message as AIMessage
if (generationMessage === undefined) {
throw new Error('Could not parse Groq output.')
}
const toolCallChunks = generationMessage.tool_calls?.map((toolCall, i) => ({
name: toolCall.name,
args: JSON.stringify(toolCall.args),
id: toolCall.id,
index: i
}))
yield new ChatGenerationChunk({
message: new AIMessageChunk({
content: generationMessage.content,
additional_kwargs: generationMessage.additional_kwargs,
tool_call_chunks: toolCallChunks
}),
text: generationMessage.tool_calls?.length ? '' : (generationMessage.content as string)
})
} else {
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
this.getClient()
const { stream } = await this.client.generateContentStream({
contents: prompt
})
return stream
})
for await (const response of stream) {
const chunk = convertResponseContentToChatGenerationChunk(response)
if (!chunk) {
continue
}
yield chunk
await runManager?.handleLLMNewToken(chunk.text ?? '')
}
}
}
}
export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields?: GoogleGenerativeAIChatInput) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxOutputTokens
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxOutputTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
if (this.modelName !== 'gemini-pro-vision' && this.modelName !== 'gemini-1.5-pro-latest') {
super.modelName = 'gemini-1.5-pro-latest'
super.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : 8192
}
}
}
function getMessageAuthor(message: BaseMessage) {
const type = message._getType()
if (ChatMessage.isInstance(message)) {
return message.role
}
return message.name ?? type
}
function convertAuthorToRole(author: string) {
switch (author) {
/**
* Note: Gemini currently is not supporting system messages
* we will convert them to human messages and merge with following
* */
case 'ai':
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
return 'model'
case 'system':
case 'human':
return 'user'
case 'function':
case 'tool':
return 'function'
default:
// Instead of throwing, we return model
// throw new Error(`Unknown / unsupported author: ${author}`)
return 'model'
}
}
function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[] {
if (typeof content === 'string') {
return [{ text: content }]
}
return content.map((c) => {
if (c.type === 'text') {
return {
text: c.text
}
}
if (c.type === 'tool_use') {
return {
functionCall: c.functionCall
}
}
/*if (c.type === "tool_use" || c.type === "tool_result") {
// TODO: Fix when SDK types are fixed
return {
...contentPart,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
}*/
if (c.type === 'image_url') {
if (!isMultimodalModel) {
throw new Error(`This model does not support images`)
}
let source
if (typeof c.image_url === 'string') {
source = c.image_url
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
source = c.image_url.url
} else {
throw new Error('Please provide image as base64 encoded data URL')
}
const [dm, data] = source.split(',')
if (!dm.startsWith('data:')) {
throw new Error('Please provide image as base64 encoded data URL')
}
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
if (encoding !== 'base64') {
throw new Error('Please provide image as base64 encoded data URL')
}
return {
inlineData: {
data,
mimeType
}
}
}
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
})
}
/*
* This is a dedicated logic for Multi Agent Supervisor to handle the case where the content is empty, and the role is the same
*/
function checkIfEmptyContentAndSameRole(contents: Content[]) {
let prevRole = ''
const removedContents: Content[] = []
for (const content of contents) {
const role = content.role
if (content.parts.length && content.parts[0].text === '' && role === prevRole) {
removedContents.push(content)
}
prevRole = role
}
return contents.filter((content) => !removedContents.includes(content))
}
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
return messages.reduce<{
content: Content[]
mergeWithPreviousContent: boolean
}>(
(acc, message, index) => {
if (!isBaseMessage(message)) {
throw new Error('Unsupported message input')
}
const author = getMessageAuthor(message)
if (author === 'system' && index !== 0) {
throw new Error('System message should be the first one')
}
const role = convertAuthorToRole(author)
const prevContent = acc.content[acc.content.length]
if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) {
throw new Error('Google Generative AI requires alternate messages between authors')
}
const parts = convertMessageContentToParts(message.content, isMultimodalModel)
if (acc.mergeWithPreviousContent) {
const prevContent = acc.content[acc.content.length - 1]
if (!prevContent) {
throw new Error('There was a problem parsing your system message. Please try a prompt without one.')
}
prevContent.parts.push(...parts)
return {
mergeWithPreviousContent: false,
content: acc.content
}
}
const content: Content = {
role,
parts
}
return {
mergeWithPreviousContent: author === 'system',
content: [...acc.content, content]
}
},
{ content: [], mergeWithPreviousContent: false }
).content
}
function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult {
// if rejected or error, return empty generations with reason in filters
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
return {
generations: [],
llmOutput: {
filters: response?.promptFeedback
}
}
}
const [candidate] = response.candidates
const { content, ...generationInfo } = candidate
const text = content.parts.map(({ text }) => text).join('')
if (content.parts.some((part) => part.functionCall)) {
const toolCalls: ToolCall[] = []
for (const fcPart of content.parts) {
const fc = fcPart.functionCall
if (fc) {
const { name, args } = fc
toolCalls.push({ name, args })
}
}
const functionCalls = toolCalls.map((tool) => {
return { functionCall: { name: tool.name, args: tool.args }, type: 'tool_use' }
})
const generation: ChatGeneration = {
text,
message: new AIMessage({
content: functionCalls,
name: !content ? undefined : content.role,
additional_kwargs: generationInfo,
tool_calls: toolCalls
}),
generationInfo
}
return {
generations: [generation]
}
} else {
const generation: ChatGeneration = {
text,
message: new AIMessage({
content: text,
name: !content ? undefined : content.role,
additional_kwargs: generationInfo
}),
generationInfo
}
return {
generations: [generation]
}
}
}
function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null {
if (!response.candidates || response.candidates.length === 0) {
return null
}
const [candidate] = response.candidates
const { content, ...generationInfo } = candidate
const text = content?.parts[0]?.text ?? ''
return new ChatGenerationChunk({
text,
message: new AIMessageChunk({
content: text,
name: !content ? undefined : content.role,
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
// so leave blank for now.
additional_kwargs: {}
}),
generationInfo
})
}
function zodToGeminiParameters(zodObj: any) {
// Gemini doesn't accept either the $schema or additionalProperties
// attributes, so we need to explicitly remove them.
const jsonSchema: any = zodToJsonSchema(zodObj)
// eslint-disable-next-line unused-imports/no-unused-vars
const { $schema, additionalProperties, ...rest } = jsonSchema
if (rest.properties) {
Object.keys(rest.properties).forEach((key) => {
if (rest.properties[key].enum?.length) {
rest.properties[key] = { type: 'string', format: 'enum', enum: rest.properties[key].enum }
}
})
}
return rest
}
function convertToGeminiTools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]) {
return [
{
functionDeclarations: structuredTools.map((structuredTool) => {
if (isStructuredTool(structuredTool)) {
const jsonSchema = zodToGeminiParameters(structuredTool.schema)
return {
name: structuredTool.name,
description: structuredTool.description,
parameters: jsonSchema
}
}
return structuredTool
})
}
]
}

View File

@ -1,8 +1,7 @@
import { ChatGooglePaLM, GooglePaLMChatInput } from '@langchain/community/chat_models/googlepalm'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class ChatGooglePaLM_ChatModels implements INode {
label: string
@ -19,7 +18,7 @@ class ChatGooglePaLM_ChatModels implements INode {
constructor() {
this.label = 'ChatGooglePaLM'
this.name = 'chatGooglePaLM'
this.version = 3.0
this.version = 2.0
this.type = 'ChatGooglePaLM'
this.icon = 'GooglePaLM.svg'
this.category = 'Chat Models'
@ -41,9 +40,15 @@ class ChatGooglePaLM_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'models/chat-bison-001'
type: 'options',
options: [
{
label: 'models/chat-bison-001',
name: 'models/chat-bison-001'
}
],
default: 'models/chat-bison-001',
optional: true
},
{
label: 'Temperature',
@ -93,13 +98,6 @@ class ChatGooglePaLM_ChatModels implements INode {
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatGooglePaLM')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as string
const temperature = nodeData.inputs?.temperature as string

View File

@ -1,8 +1,8 @@
import { GoogleAuthOptions } from 'google-auth-library'
import { BaseCache } from '@langchain/core/caches'
import { ChatVertexAI, ChatVertexAIInput } from '@langchain/google-vertexai'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { ChatGoogleVertexAI, GoogleVertexAIChatInput } from '@langchain/community/chat_models/googlevertexai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class GoogleVertexAI_ChatModels implements INode {
label: string
@ -19,12 +19,12 @@ class GoogleVertexAI_ChatModels implements INode {
constructor() {
this.label = 'ChatGoogleVertexAI'
this.name = 'chatGoogleVertexAI'
this.version = 4.0
this.version = 2.0
this.type = 'ChatGoogleVertexAI'
this.icon = 'GoogleVertex.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around VertexAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(ChatVertexAI)]
this.baseClasses = [this.type, ...getBaseClasses(ChatGoogleVertexAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -44,9 +44,27 @@ class GoogleVertexAI_ChatModels implements INode {
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'chat-bison'
type: 'options',
options: [
{
label: 'chat-bison',
name: 'chat-bison'
},
{
label: 'codechat-bison',
name: 'codechat-bison'
},
{
label: 'chat-bison-32k',
name: 'chat-bison-32k'
},
{
label: 'codechat-bison-32k',
name: 'codechat-bison-32k'
}
],
default: 'chat-bison',
optional: true
},
{
label: 'Temperature',
@ -71,33 +89,17 @@ class GoogleVertexAI_ChatModels implements INode {
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Top Next Highest Probability Tokens',
name: 'topK',
type: 'number',
description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`,
step: 1,
optional: true,
additionalParams: true
}
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatGoogleVertexAI')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const googleApplicationCredentialFilePath = getCredentialParam('googleApplicationCredentialFilePath', credentialData, nodeData)
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
const projectID = getCredentialParam('projectID', credentialData, nodeData)
const authOptions: ICommonObject = {}
const authOptions: GoogleAuthOptions = {}
if (Object.keys(credentialData).length !== 0) {
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
throw new Error('Please specify your Google Application Credential')
@ -119,9 +121,8 @@ class GoogleVertexAI_ChatModels implements INode {
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
const cache = nodeData.inputs?.cache as BaseCache
const topK = nodeData.inputs?.topK as string
const obj: ChatVertexAIInput = {
const obj: GoogleVertexAIChatInput<GoogleAuthOptions> = {
temperature: parseFloat(temperature),
model: modelName
}
@ -130,9 +131,8 @@ class GoogleVertexAI_ChatModels implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache
if (topK) obj.topK = parseFloat(topK)
const model = new ChatVertexAI(obj)
const model = new ChatGoogleVertexAI(obj)
return model
}
}

View File

@ -18,7 +18,7 @@ class ChatHuggingFace_ChatModels implements INode {
constructor() {
this.label = 'ChatHuggingFace'
this.name = 'chatHuggingFace'
this.version = 3.0
this.version = 2.0
this.type = 'ChatHuggingFace'
this.icon = 'HuggingFace.svg'
this.category = 'Chat Models'
@ -42,7 +42,8 @@ class ChatHuggingFace_ChatModels implements INode {
name: 'model',
type: 'string',
description: 'If using own inference endpoint, leave this blank',
placeholder: 'gpt2'
placeholder: 'gpt2',
optional: true
},
{
label: 'Endpoint',
@ -96,16 +97,6 @@ class ChatHuggingFace_ChatModels implements INode {
description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters',
optional: true,
additionalParams: true
},
{
label: 'Stop Sequence',
name: 'stop',
type: 'string',
rows: 4,
placeholder: 'AI assistant:',
description: 'Sets the stop sequences to use. Use comma to seperate different sequences.',
optional: true,
additionalParams: true
}
]
}
@ -119,7 +110,6 @@ class ChatHuggingFace_ChatModels implements INode {
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const endpoint = nodeData.inputs?.endpoint as string
const cache = nodeData.inputs?.cache as BaseCache
const stop = nodeData.inputs?.stop as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
@ -134,11 +124,7 @@ class ChatHuggingFace_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (hfTopK) obj.topK = parseFloat(hfTopK)
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
if (endpoint) obj.endpointUrl = endpoint
if (stop) {
const stopSequences = stop.split(',')
obj.stopSequences = stopSequences
}
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
if (cache) huggingFace.cache = cache

View File

@ -1,19 +1,32 @@
import { LLM, BaseLLMParams } from '@langchain/core/language_models/llms'
import { getEnvironmentVariable } from '../../../src/utils'
import { GenerationChunk } from '@langchain/core/outputs'
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
export interface HFInput {
/** Model to use */
model: string
/** Sampling temperature to use */
temperature?: number
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number
stopSequences?: string[]
/** Total probability mass of tokens to consider at each step */
topP?: number
/** Integer to define the top tokens considered within the sample operation to create new text. */
topK?: number
/** Penalizes repeated tokens according to frequency */
frequencyPenalty?: number
/** API key to use. */
apiKey?: string
endpointUrl?: string
includeCredentials?: string | boolean
/** Private endpoint to use. */
endpoint?: string
}
export class HuggingFaceInference extends LLM implements HFInput {
@ -27,8 +40,6 @@ export class HuggingFaceInference extends LLM implements HFInput {
temperature: number | undefined = undefined
stopSequences: string[] | undefined = undefined
maxTokens: number | undefined = undefined
topP: number | undefined = undefined
@ -39,9 +50,7 @@ export class HuggingFaceInference extends LLM implements HFInput {
apiKey: string | undefined = undefined
endpointUrl: string | undefined = undefined
includeCredentials: string | boolean | undefined = undefined
endpoint: string | undefined = undefined
constructor(fields?: Partial<HFInput> & BaseLLMParams) {
super(fields ?? {})
@ -49,13 +58,11 @@ export class HuggingFaceInference extends LLM implements HFInput {
this.model = fields?.model ?? this.model
this.temperature = fields?.temperature ?? this.temperature
this.maxTokens = fields?.maxTokens ?? this.maxTokens
this.stopSequences = fields?.stopSequences ?? this.stopSequences
this.topP = fields?.topP ?? this.topP
this.topK = fields?.topK ?? this.topK
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty
this.endpoint = fields?.endpoint ?? ''
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
this.endpointUrl = fields?.endpointUrl
this.includeCredentials = fields?.includeCredentials
if (!this.apiKey) {
throw new Error(
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
@ -67,65 +74,31 @@ export class HuggingFaceInference extends LLM implements HFInput {
return 'hf'
}
invocationParams(options?: this['ParsedCallOptions']) {
return {
model: this.model,
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const { HfInference } = await HuggingFaceInference.imports()
const hf = new HfInference(this.apiKey)
const obj: any = {
parameters: {
// make it behave similar to openai, returning only the generated text
return_full_text: false,
temperature: this.temperature,
max_new_tokens: this.maxTokens,
stop: options?.stop ?? this.stopSequences,
top_p: this.topP,
top_k: this.topK,
repetition_penalty: this.frequencyPenalty
}
},
inputs: prompt
}
}
async *_streamResponseChunks(
prompt: string,
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const hfi = await this._prepareHFInference()
const stream = await this.caller.call(async () =>
hfi.textGenerationStream({
...this.invocationParams(options),
inputs: prompt
})
)
for await (const chunk of stream) {
const token = chunk.token.text
yield new GenerationChunk({ text: token, generationInfo: chunk })
await runManager?.handleLLMNewToken(token ?? '')
// stream is done
if (chunk.generated_text)
yield new GenerationChunk({
text: '',
generationInfo: { finished: true }
})
if (this.endpoint) {
hf.endpoint(this.endpoint)
} else {
obj.model = this.model
}
}
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const hfi = await this._prepareHFInference()
const args = { ...this.invocationParams(options), inputs: prompt }
const res = await this.caller.callWithOptions({ signal: options.signal }, hfi.textGeneration.bind(hfi), args)
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), obj)
return res.generated_text
}
/** @ignore */
private async _prepareHFInference() {
const { HfInference } = await HuggingFaceInference.imports()
const hfi = new HfInference(this.apiKey, {
includeCredentials: this.includeCredentials
})
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi
}
/** @ignore */
static async imports(): Promise<{
HfInference: typeof import('@huggingface/inference').HfInference
@ -134,7 +107,7 @@ export class HuggingFaceInference extends LLM implements HFInput {
const { HfInference } = await import('@huggingface/inference')
return { HfInference }
} catch (e) {
throw new Error('Please install huggingface as a dependency with, e.g. `pnpm install @huggingface/inference`')
throw new Error('Please install huggingface as a dependency with, e.g. `yarn add @huggingface/inference`')
}
}
}

Some files were not shown because too many files have changed in this diff Show More