Compare commits
287 Commits
flowise-co
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
54ff43e8f1 | |
|
|
95b2cf7b7f | |
|
|
074bb738a3 | |
|
|
78e60e22d2 | |
|
|
9e88c45051 | |
|
|
363d1bfc44 | |
|
|
9ea439d135 | |
|
|
1015e1193f | |
|
|
7166317482 | |
|
|
3cbbd59242 | |
|
|
90558ca688 | |
|
|
b1e38783e4 | |
|
|
dfdeb02b3a | |
|
|
cacbfa8162 | |
|
|
656f6cad81 | |
|
|
efc6e02828 | |
|
|
512df4197c | |
|
|
4d174495dc | |
|
|
15a416a58f | |
|
|
e69fee1375 | |
|
|
cc24f94358 | |
|
|
b55f87cc40 | |
|
|
7067f90153 | |
|
|
d0354bb25c | |
|
|
96dfedde6e | |
|
|
1367f095d4 | |
|
|
109b0367cc | |
|
|
e2ae524edd | |
|
|
eff1336b82 | |
|
|
18b83333d3 | |
|
|
0fc5e3d0c5 | |
|
|
aec9e7a3b7 | |
|
|
83ecc88b35 | |
|
|
f811fc4e5d | |
|
|
d4f80394d3 | |
|
|
842bfc66fe | |
|
|
72e5287343 | |
|
|
8bb841641e | |
|
|
b662dd79c6 | |
|
|
1849637af8 | |
|
|
21743656a8 | |
|
|
b5ead0745b | |
|
|
371e632a2c | |
|
|
c34eb8ee15 | |
|
|
f2c6a1988f | |
|
|
76c5e6a893 | |
|
|
3ab0d99711 | |
|
|
5ba468b4cc | |
|
|
5e4d640ed7 | |
|
|
6fb775fe95 | |
|
|
88ee9b09a7 | |
|
|
9f9aff34f8 | |
|
|
66e1296a06 | |
|
|
f1e78d870e | |
|
|
be3a887e68 | |
|
|
c8939dc2a6 | |
|
|
34251fa336 | |
|
|
cb93d9d557 | |
|
|
5899e50c54 | |
|
|
e0a03ad46d | |
|
|
582dcc8508 | |
|
|
5a73eaa588 | |
|
|
4ec8376efa | |
|
|
5ba9493b30 | |
|
|
bdbb6f850a | |
|
|
55f52c4d50 | |
|
|
7eb9341fdc | |
|
|
a799ac8087 | |
|
|
76abd20e85 | |
|
|
272fd914bd | |
|
|
f2a0ffe542 | |
|
|
8c66d2c735 | |
|
|
e15e6fafdc | |
|
|
e5f0ca2dd3 | |
|
|
1d9927027d | |
|
|
c42ef95a15 | |
|
|
f64931bfcc | |
|
|
6a58ae4e80 | |
|
|
04e0ce1783 | |
|
|
b5b929e192 | |
|
|
7706b3484a | |
|
|
d50563765e | |
|
|
eb738a1552 | |
|
|
059eae4268 | |
|
|
d734747ec0 | |
|
|
912c8f3d5b | |
|
|
48ac815f8e | |
|
|
2878af69e4 | |
|
|
5d649b27cf | |
|
|
f5b08864b8 | |
|
|
97386bc3b2 | |
|
|
22f39692e5 | |
|
|
82899d9d5d | |
|
|
50c53de296 | |
|
|
e32b643445 | |
|
|
265de4e97e | |
|
|
ff2381741e | |
|
|
e83dcb01b8 | |
|
|
9d10dc4856 | |
|
|
68625c0589 | |
|
|
8ebc4dcfd5 | |
|
|
95f1090bed | |
|
|
5733a8089e | |
|
|
8caca472ba | |
|
|
816436f8fa | |
|
|
0521e6b3f9 | |
|
|
0365afbeeb | |
|
|
0de7fb8509 | |
|
|
b5e502f3b6 | |
|
|
c022972cf8 | |
|
|
b65487564a | |
|
|
49c07552ce | |
|
|
b4829275aa | |
|
|
b3069932e1 | |
|
|
b50103021c | |
|
|
4fbc3f6cfe | |
|
|
d3f03e380e | |
|
|
823cefb5c5 | |
|
|
ee9d3a33fa | |
|
|
cb0eb67df0 | |
|
|
32ad3b1366 | |
|
|
b952350a7b | |
|
|
38ce851200 | |
|
|
1ee6f1f88a | |
|
|
dce84106ef | |
|
|
e851af90b1 | |
|
|
96d4ab66f2 | |
|
|
2048976545 | |
|
|
a9f9c8874c | |
|
|
26e7a1ac35 | |
|
|
43b22476e3 | |
|
|
d4a5474f48 | |
|
|
ef532866fd | |
|
|
a84eabbef2 | |
|
|
d34cef2dc7 | |
|
|
40718bd77a | |
|
|
a6bcaba592 | |
|
|
80f24ac30c | |
|
|
40e36d1b39 | |
|
|
af4e28aa91 | |
|
|
713a1e815d | |
|
|
09569d0b06 | |
|
|
f9195b6a68 | |
|
|
5a137a478c | |
|
|
9971627821 | |
|
|
2254d16c3a | |
|
|
c5e06bce6d | |
|
|
d5a97060e2 | |
|
|
e71266de87 | |
|
|
db452cd74d | |
|
|
adea2f0830 | |
|
|
2b1273ca31 | |
|
|
c4eb75ddde | |
|
|
5775947586 | |
|
|
b7eb876b39 | |
|
|
198fffe331 | |
|
|
a295573f82 | |
|
|
51058b2a31 | |
|
|
fa3d21bc30 | |
|
|
723837b30f | |
|
|
6899b27229 | |
|
|
546eafe6a1 | |
|
|
7360d1d9a6 | |
|
|
4782c0f6fc | |
|
|
f378dcc332 | |
|
|
8d549f87b5 | |
|
|
728af22cc4 | |
|
|
7006d64de0 | |
|
|
6ab259b6aa | |
|
|
4c2ba109fd | |
|
|
95beaba9d9 | |
|
|
f5be889ea8 | |
|
|
f4c7887e50 | |
|
|
b7e4fc9517 | |
|
|
6bd8aaefc8 | |
|
|
d1c8f7eb96 | |
|
|
e4ab2a9e33 | |
|
|
713077381b | |
|
|
2cadd68a43 | |
|
|
1ccd3c7170 | |
|
|
d07bd96c7b | |
|
|
5b0941e7d3 | |
|
|
fec087c54d | |
|
|
a71785f0e2 | |
|
|
b34094035d | |
|
|
1130620d40 | |
|
|
0b3da598dd | |
|
|
788d40f26b | |
|
|
4daf29db80 | |
|
|
a82dd93c6c | |
|
|
e7a58fc700 | |
|
|
c33642cdf9 | |
|
|
57b716c7d7 | |
|
|
d7194e8aaa | |
|
|
024b2ad22e | |
|
|
d96459d87b | |
|
|
1996cc40ba | |
|
|
e630123f63 | |
|
|
eabc84ee9f | |
|
|
0511ea1f56 | |
|
|
20a500efb5 | |
|
|
1129782758 | |
|
|
057e056257 | |
|
|
827de07e94 | |
|
|
19bb23440a | |
|
|
19e14c4798 | |
|
|
658fa3984e | |
|
|
eed7de6df5 | |
|
|
39198a42ad | |
|
|
39f7e4c263 | |
|
|
87e30399d4 | |
|
|
e422ce287b | |
|
|
957694a912 | |
|
|
ea255db15d | |
|
|
b9b0c9d227 | |
|
|
dad30472b6 | |
|
|
70706a7183 | |
|
|
a09b7f7e39 | |
|
|
aa6aa2e461 | |
|
|
a57cd76757 | |
|
|
f116dba84e | |
|
|
794818b434 | |
|
|
6fd0fe60fc | |
|
|
b177644354 | |
|
|
a702e7408c | |
|
|
3a7c2fd4db | |
|
|
414b9f125c | |
|
|
4ca82ee733 | |
|
|
7e84049990 | |
|
|
509b2dd36d | |
|
|
813f622f6d | |
|
|
0267005225 | |
|
|
dee681b63e | |
|
|
0c0308b9c4 | |
|
|
57efa25fe5 | |
|
|
ff09ae6a50 | |
|
|
bf5be755f6 | |
|
|
c11c43cf0d | |
|
|
ec1bbc84bc | |
|
|
ac02cde2fa | |
|
|
187d306653 | |
|
|
0ba6548163 | |
|
|
50a2e911f2 | |
|
|
bb7373ee62 | |
|
|
cd4c659009 | |
|
|
58122e985c | |
|
|
0c649c9ce3 | |
|
|
01559d4da3 | |
|
|
3fbfd3d425 | |
|
|
536da36d48 | |
|
|
39e380eac2 | |
|
|
d437a6fb11 | |
|
|
3ff5a5ee05 | |
|
|
5d14d0af1b | |
|
|
134ecb8b2a | |
|
|
fa081acea0 | |
|
|
49846cd66a | |
|
|
1459190adc | |
|
|
0ebfa68b93 | |
|
|
b20a46a03d | |
|
|
bc32759d96 | |
|
|
f98509226d | |
|
|
534d6e4bbf | |
|
|
5f69a0652e | |
|
|
b98c15d832 | |
|
|
7c4056e305 | |
|
|
ad3d5032a5 | |
|
|
02963ce0d5 | |
|
|
c4cc13c9c7 | |
|
|
56b610cfa2 | |
|
|
eb2a83fda7 | |
|
|
381a4553bd | |
|
|
a4a2fbb08f | |
|
|
4a6e71058c | |
|
|
e95a780b26 | |
|
|
c2ecb48900 | |
|
|
3096a0fa50 | |
|
|
dc59b0468f | |
|
|
4a7da99996 | |
|
|
ae78ea6b43 | |
|
|
6d4fa7b368 | |
|
|
7020974b55 | |
|
|
b7bb043d3a | |
|
|
811a6a0f41 | |
|
|
00bc63296b | |
|
|
afff39e334 | |
|
|
54c59024c5 |
|
|
@ -5,3 +5,6 @@ build
|
|||
**/node_modules
|
||||
**/build
|
||||
**/dist
|
||||
|
||||
packages/server/.env
|
||||
packages/ui/.env
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
node_modules
|
||||
dist
|
||||
|
|
@ -11,14 +11,14 @@ jobs:
|
|||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Show PR info
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
|
|
@ -28,6 +28,6 @@ jobs:
|
|||
"ref": "${{ github.ref }}",
|
||||
"prNumber": "${{ github.event.pull_request.number }}",
|
||||
"prTitle": "${{ github.event.pull_request.title }}",
|
||||
"prDescription": "${{ github.event.pull_request.description }}",
|
||||
"prDescription": "",
|
||||
"sha": "${{ github.sha }}"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
run: |
|
||||
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
|
|
@ -32,5 +32,5 @@ jobs:
|
|||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"sha": "${{ github.sha }}",
|
||||
"commitMessage": "${{ github.event.commits[0].message }}"
|
||||
"commitMessage": "${{ github.event.commits[0].id }}"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{github.event.inputs.node_version}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: flowiseai/flowise:${{github.event.inputs.tag_version}}
|
||||
|
|
@ -1,17 +1,13 @@
|
|||
name: Node CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
|
|
@ -22,16 +18,32 @@ jobs:
|
|||
env:
|
||||
PUPPETEER_SKIP_DOWNLOAD: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
version: 9.0.4
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
check-latest: false
|
||||
cache: 'pnpm'
|
||||
- run: npm i -g pnpm
|
||||
|
||||
- run: pnpm install
|
||||
|
||||
- run: ./node_modules/.bin/cypress install
|
||||
- run: pnpm lint
|
||||
|
||||
- run: pnpm build
|
||||
- name: Install dependencies
|
||||
uses: cypress-io/github-action@v6
|
||||
with:
|
||||
working-directory: ./
|
||||
runTests: false
|
||||
- name: Cypress test
|
||||
uses: cypress-io/github-action@v6
|
||||
with:
|
||||
install: false
|
||||
working-directory: packages/server
|
||||
start: pnpm start
|
||||
wait-on: 'http://localhost:3000'
|
||||
wait-on-timeout: 120
|
||||
browser: chrome
|
||||
|
|
|
|||
|
|
@ -11,6 +11,9 @@
|
|||
**/logs
|
||||
**/*.log
|
||||
|
||||
## pnpm
|
||||
.pnpm-store/
|
||||
|
||||
## build
|
||||
**/dist
|
||||
**/build
|
||||
|
|
@ -44,3 +47,60 @@
|
|||
|
||||
## compressed
|
||||
**/*.tgz
|
||||
|
||||
## vscode
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
!.vscode/*.code-snippets
|
||||
|
||||
# Local History for Visual Studio Code
|
||||
.history/
|
||||
|
||||
## other keys
|
||||
*.key
|
||||
*.keys
|
||||
*.priv
|
||||
*.rsa
|
||||
*.key.json
|
||||
|
||||
## ssh keys
|
||||
*.ssh
|
||||
*.ssh-key
|
||||
.key-mrc
|
||||
|
||||
## Certificate Authority
|
||||
*.ca
|
||||
|
||||
## Certificate
|
||||
*.crt
|
||||
|
||||
## Certificate Sign Request
|
||||
*.csr
|
||||
|
||||
## Certificate
|
||||
*.der
|
||||
|
||||
## Key database file
|
||||
*.kdb
|
||||
|
||||
## OSCP request data
|
||||
*.org
|
||||
|
||||
## PKCS #12
|
||||
*.p12
|
||||
|
||||
## PEM-encoded certificate data
|
||||
*.pem
|
||||
|
||||
## Random number seed
|
||||
*.rnd
|
||||
|
||||
## SSLeay data
|
||||
*.ssleay
|
||||
|
||||
## S/MIME message
|
||||
*.smime
|
||||
*.vsix
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
**/node_modules
|
||||
**/dist
|
||||
**/build
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
module.exports = {
|
||||
printWidth: 140,
|
||||
singleQuote: true,
|
||||
jsxSingleQuote: true,
|
||||
trailingComma: 'none',
|
||||
tabWidth: 4,
|
||||
semi: false,
|
||||
endOfLine: 'auto'
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
English | [中文](<./CODE_OF_CONDUCT-ZH.md>)
|
||||
English | [中文](./i18n/CODE_OF_CONDUCT-ZH.md)
|
||||
|
||||
## Our Pledge
|
||||
|
||||
|
|
|
|||
|
|
@ -1,159 +0,0 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# 贡献给 Flowise
|
||||
|
||||
[English](./CONTRIBUTING.md) | 中文
|
||||
|
||||
我们欢迎任何形式的贡献。
|
||||
|
||||
## ⭐ 点赞
|
||||
|
||||
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
## 🙋 问题和回答
|
||||
|
||||
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
|
||||
|
||||
## 🙌 分享 Chatflow
|
||||
|
||||
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON,附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
|
||||
|
||||
## 💡 想法
|
||||
|
||||
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
|
||||
|
||||
## 🐞 报告错误
|
||||
|
||||
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
|
||||
|
||||
## 👨💻 贡献代码
|
||||
|
||||
不确定要贡献什么?一些想法:
|
||||
|
||||
- 从 `packages/components` 创建新组件
|
||||
- 更新现有组件,如扩展功能、修复错误
|
||||
- 添加新的 Chatflow 想法
|
||||
|
||||
### 开发人员
|
||||
|
||||
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
||||
|
||||
- `server`:用于提供 API 逻辑的 Node 后端
|
||||
- `ui`:React 前端
|
||||
- `components`:Langchain/LlamaIndex 组件
|
||||
|
||||
#### 先决条件
|
||||
|
||||
- 安装 [PNPM](https://pnpm.io/installation)
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
#### 逐步指南
|
||||
|
||||
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
2. 克隆你 fork 的存储库。
|
||||
|
||||
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
|
||||
|
||||
- 对于功能分支:`feature/<你的新功能>`
|
||||
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
|
||||
|
||||
4. 切换到新创建的分支。
|
||||
|
||||
5. 进入存储库文件夹
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
6. 安装所有模块的依赖项:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
7. 构建所有代码:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
9. 开发时:
|
||||
|
||||
- 在`packages/ui`中创建`.env`文件并指定`VITE_PORT`(参考`.env.example`)
|
||||
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`)
|
||||
- 运行
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
|
||||
|
||||
对于`packages/components`中进行的更改,再次运行`pnpm build`以应用更改。
|
||||
|
||||
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
和
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| --------------------------- | ------------------------------------------------------ | ----------------------------------------------- | ----------------------------------- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
|
||||
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
```
|
||||
npx flowise start --PORT=3000 --DEBUG=true
|
||||
```
|
||||
|
||||
## 📖 贡献文档
|
||||
|
||||
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
|
||||
|
||||
## 🏷️ Pull Request 流程
|
||||
|
||||
当您打开一个 Pull Request 时,FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
|
||||
|
||||
##
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# Contributing to Flowise
|
||||
|
||||
English | [中文](./CONTRIBUTING-ZH.md)
|
||||
English | [中文](./i18n/CONTRIBUTING-ZH.md)
|
||||
|
||||
We appreciate any form of contributions.
|
||||
|
||||
|
|
@ -44,7 +44,7 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
#### Prerequisite
|
||||
|
||||
- Install [PNPM](https://pnpm.io/installation)
|
||||
- Install [PNPM](https://pnpm.io/installation). The project is configured to use pnpm v9.
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
|
@ -120,33 +120,41 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| Variable | Description | Type | Default |
|
||||
| --------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_USERNAME | Username to login | String | |
|
||||
| FLOWISE_PASSWORD | Password to login | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
|
||||
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_USERNAME | Username to login | String | |
|
||||
| FLOWISE_PASSWORD | Password to login | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DISABLE_CHATFLOW_REUSE | Forces the creation of a new ChatFlow for each call instead of reusing existing ones from cache | Boolean | |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
|
||||
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
# Run image
|
||||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:18-alpine
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
English | [中文](./README-ZH.md)
|
||||
English | [中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
|
||||
<h3>Drag & drop UI to build your customized LLM flow</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
|
|
@ -44,9 +44,9 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
|
||||
1. Go to `docker` folder at the root of the project
|
||||
2. Copy `.env.example` file, paste it into the same location, and rename to `.env`
|
||||
3. `docker-compose up -d`
|
||||
3. `docker compose up -d`
|
||||
4. Open [http://localhost:3000](http://localhost:3000)
|
||||
5. You can bring the containers down by `docker-compose stop`
|
||||
5. You can bring the containers down by `docker compose stop`
|
||||
|
||||
### Docker Image
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
module.exports = {
|
||||
presets: [
|
||||
'@babel/preset-typescript',
|
||||
[
|
||||
'@babel/preset-env',
|
||||
{
|
||||
targets: {
|
||||
node: 'current'
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
|
|
@ -5,17 +5,16 @@ SECRETKEY_PATH=/root/.flowise
|
|||
LOG_PATH=/root/.flowise/logs
|
||||
BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
# CORS_ORIGINS="*"
|
||||
# IFRAME_ORIGINS="*"
|
||||
|
||||
# NUMBER_OF_PROXIES= 1
|
||||
# CORS_ORIGINS=*
|
||||
# IFRAME_ORIGINS=*
|
||||
|
||||
# DATABASE_TYPE=postgres
|
||||
# DATABASE_PORT=""
|
||||
# DATABASE_PORT=5432
|
||||
# DATABASE_HOST=""
|
||||
# DATABASE_NAME="flowise"
|
||||
# DATABASE_USER=""
|
||||
# DATABASE_PASSWORD=""
|
||||
# DATABASE_NAME=flowise
|
||||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
|
@ -23,8 +22,11 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
|
|||
# FLOWISE_PASSWORD=1234
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
|
||||
# FLOWISE_FILE_SIZE_LIMIT=50mb
|
||||
|
||||
# DISABLE_CHATFLOW_REUSE=true
|
||||
|
||||
# DEBUG=true
|
||||
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
|
||||
# LOG_LEVEL=info (error | warn | info | verbose | debug)
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
|
||||
|
|
@ -33,4 +35,15 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
|
|||
# LANGCHAIN_API_KEY=your_api_key
|
||||
# LANGCHAIN_PROJECT=your_project
|
||||
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
|
||||
# Uncomment the following line to enable model list config, load the list of models from your local config file
|
||||
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
|
||||
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
|
||||
|
||||
# STORAGE_TYPE=local (local | s3)
|
||||
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
|
||||
# S3_STORAGE_BUCKET_NAME=flowise
|
||||
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
|
||||
# S3_STORAGE_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
# S3_STORAGE_REGION=us-west-2
|
||||
|
|
@ -1,21 +1,25 @@
|
|||
FROM node:18-alpine
|
||||
# Stage 1: Build stage
|
||||
FROM node:20-alpine as build
|
||||
|
||||
USER root
|
||||
|
||||
RUN apk add --no-cache git
|
||||
RUN apk add --no-cache python3 py3-pip make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
# Skip downloading Chrome for Puppeteer (saves build time)
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
# You can install a specific version like: flowise@1.0.0
|
||||
# Install latest Flowise globally (specific version can be set: flowise@1.0.0)
|
||||
RUN npm install -g flowise
|
||||
|
||||
WORKDIR /data
|
||||
# Stage 2: Runtime stage
|
||||
FROM node:20-alpine
|
||||
|
||||
CMD "flowise"
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev
|
||||
|
||||
# Set the environment variable for Puppeteer to find Chromium
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
# Copy Flowise from the build stage
|
||||
COPY --from=build /usr/local/lib/node_modules /usr/local/lib/node_modules
|
||||
COPY --from=build /usr/local/bin /usr/local/bin
|
||||
|
||||
ENTRYPOINT ["flowise", "start"]
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
|
|||
## Usage
|
||||
|
||||
1. Create `.env` file and specify the `PORT` (refer to `.env.example`)
|
||||
2. `docker-compose up -d`
|
||||
2. `docker compose up -d`
|
||||
3. Open [http://localhost:3000](http://localhost:3000)
|
||||
4. You can bring the containers down by `docker-compose stop`
|
||||
4. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
|
|
@ -19,9 +19,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
|
|||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
```
|
||||
3. `docker-compose up -d`
|
||||
3. `docker compose up -d`
|
||||
4. Open [http://localhost:3000](http://localhost:3000)
|
||||
5. You can bring the containers down by `docker-compose stop`
|
||||
5. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
|
|
|
|||
|
|
@ -28,8 +28,9 @@ services:
|
|||
- LOG_PATH=${LOG_PATH}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
command: /bin/sh -c "sleep 3; flowise start"
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# 贡献者公约行为准则
|
||||
|
||||
[English](<./CODE_OF_CONDUCT.md>) | 中文
|
||||
[English](../CODE_OF_CONDUCT.md) | 中文
|
||||
|
||||
## 我们的承诺
|
||||
|
||||
|
|
@ -44,6 +44,6 @@
|
|||
|
||||
## 归属
|
||||
|
||||
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
|
||||
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4 版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
|
||||
|
||||
[主页]: http://contributor-covenant.org
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# 贡献给 Flowise
|
||||
|
||||
[English](../CONTRIBUTING.md) | 中文
|
||||
|
||||
我们欢迎任何形式的贡献。
|
||||
|
||||
## ⭐ 点赞
|
||||
|
||||
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
## 🙋 问题和回答
|
||||
|
||||
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
|
||||
|
||||
## 🙌 分享 Chatflow
|
||||
|
||||
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON,附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
|
||||
|
||||
## 💡 想法
|
||||
|
||||
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
|
||||
|
||||
## 🐞 报告错误
|
||||
|
||||
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
|
||||
|
||||
## 👨💻 贡献代码
|
||||
|
||||
不确定要贡献什么?一些想法:
|
||||
|
||||
- 从 `packages/components` 创建新组件
|
||||
- 更新现有组件,如扩展功能、修复错误
|
||||
- 添加新的 Chatflow 想法
|
||||
|
||||
### 开发人员
|
||||
|
||||
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
||||
|
||||
- `server`:用于提供 API 逻辑的 Node 后端
|
||||
- `ui`:React 前端
|
||||
- `components`:Langchain/LlamaIndex 组件
|
||||
|
||||
#### 先决条件
|
||||
|
||||
- 安装 [PNPM](https://pnpm.io/installation)
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
#### 逐步指南
|
||||
|
||||
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
2. 克隆你 fork 的存储库。
|
||||
|
||||
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
|
||||
|
||||
- 对于功能分支:`feature/<你的新功能>`
|
||||
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
|
||||
|
||||
4. 切换到新创建的分支。
|
||||
|
||||
5. 进入存储库文件夹
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
6. 安装所有模块的依赖项:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
7. 构建所有代码:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
9. 开发时:
|
||||
|
||||
- 在`packages/ui`中创建`.env`文件并指定`VITE_PORT`(参考`.env.example`)
|
||||
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`)
|
||||
- 运行
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
|
||||
|
||||
对于`packages/components`中进行的更改,再次运行`pnpm build`以应用更改。
|
||||
|
||||
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
和
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| ---------------------------- | -------------------------------------------------------------------- | ----------------------------------------------- | ----------------------------------- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
|
||||
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
|
||||
| DISABLE_CHATFLOW_REUSE | 强制为每次调用创建一个新的 ChatFlow,而不是重用缓存中的现有 ChatFlow | 布尔值 | |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
|
||||
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | S3 存储文件夹路径, 当`STORAGE_TYPE`是`s3` | 字符串 | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| S3_STORAGE_REGION | S3 存储地区 | 字符串 | |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
```
|
||||
npx flowise start --PORT=3000 --DEBUG=true
|
||||
```
|
||||
|
||||
## 📖 贡献文档
|
||||
|
||||
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
|
||||
|
||||
## 🏷️ Pull Request 流程
|
||||
|
||||
当您打开一个 Pull Request 时,FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
|
||||
|
||||
##
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - LLM アプリを簡単に構築
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
[](https://twitter.com/FlowiseAI)
|
||||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
|
||||
|
||||
<h3>ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
|
||||
## ⚡ クイックスタート
|
||||
|
||||
[NodeJS](https://nodejs.org/en/download) >= 18.15.0 をダウンロードしてインストール
|
||||
|
||||
1. Flowise のインストール
|
||||
```bash
|
||||
npm install -g flowise
|
||||
```
|
||||
2. Flowise の実行
|
||||
|
||||
```bash
|
||||
npx flowise start
|
||||
```
|
||||
|
||||
ユーザー名とパスワードを入力
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) を開く
|
||||
|
||||
## 🐳 Docker
|
||||
|
||||
### Docker Compose
|
||||
|
||||
1. プロジェクトのルートにある `docker` フォルダに移動する
|
||||
2. `.env.example` ファイルをコピーして同じ場所に貼り付け、名前を `.env` に変更する
|
||||
3. `docker compose up -d`
|
||||
4. [http://localhost:3000](http://localhost:3000) を開く
|
||||
5. コンテナを停止するには、`docker compose stop` を使用します
|
||||
|
||||
### Docker Image
|
||||
|
||||
1. ローカルにイメージを構築する:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
2. image を実行:
|
||||
|
||||
```bash
|
||||
docker run -d --name flowise -p 3000:3000 flowise
|
||||
```
|
||||
|
||||
3. image を停止:
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
||||
## 👨💻 開発者向け
|
||||
|
||||
Flowise には、3 つの異なるモジュールが 1 つの mono リポジトリにあります。
|
||||
|
||||
- `server`: API ロジックを提供する Node バックエンド
|
||||
- `ui`: React フロントエンド
|
||||
- `components`: サードパーティノードとの統合
|
||||
|
||||
### 必須条件
|
||||
|
||||
- [PNPM](https://pnpm.io/installation) をインストール
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### セットアップ
|
||||
|
||||
1. リポジトリをクローン
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. リポジトリフォルダに移動
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. すべてのモジュールの依存関係をインストール:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. すべてのコードをビルド:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. アプリを起動:
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
[http://localhost:3000](http://localhost:3000) でアプリにアクセスできるようになりました
|
||||
|
||||
6. 開発用ビルド:
|
||||
|
||||
- `.env` ファイルを作成し、`packages/ui` に `VITE_PORT` を指定する(`.env.example` を参照)
|
||||
- `.env` ファイルを作成し、`packages/server` に `PORT` を指定する(`.env.example` を参照)
|
||||
- 実行
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
コードの変更は [http://localhost:8080](http://localhost:8080) に自動的にアプリをリロードします
|
||||
|
||||
## 🔒 認証
|
||||
|
||||
アプリレベルの認証を有効にするには、 `FLOWISE_USERNAME` と `FLOWISE_PASSWORD` を `packages/server` の `.env` ファイルに追加します:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 環境変数
|
||||
|
||||
Flowise は、インスタンスを設定するためのさまざまな環境変数をサポートしています。`packages/server` フォルダ内の `.env` ファイルで以下の変数を指定することができる。[続き](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)を読む
|
||||
|
||||
## 📖 ドキュメント
|
||||
|
||||
[Flowise ドキュメント](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 セルフホスト
|
||||
|
||||
お客様の既存インフラに Flowise をセルフホストでデプロイ、様々な[デプロイ](https://docs.flowiseai.com/configuration/deployment)をサポートします
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>その他</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
- [Hugging Face Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Hugging Face Spaces"></a>
|
||||
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 クラウドホスト
|
||||
|
||||
近日公開
|
||||
|
||||
## 🙋 サポート
|
||||
|
||||
ご質問、問題提起、新機能のご要望は、[discussion](https://github.com/FlowiseAI/Flowise/discussions)までお気軽にどうぞ
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
これらの素晴らしい貢献者に感謝します
|
||||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[コントリビューティングガイド](CONTRIBUTING.md)を参照してください。質問や問題があれば、[Discord](https://discord.gg/jbaHfsRVBW) までご連絡ください。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 ライセンス
|
||||
|
||||
このリポジトリのソースコードは、[Apache License Version 2.0](LICENSE.md)の下で利用可能です。
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - 간편한 LLM 애플리케이션 제작
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
[](https://twitter.com/FlowiseAI)
|
||||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
|
||||
|
||||
<h3>드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
|
||||
## ⚡빠른 시작 가이드
|
||||
|
||||
18.15.0 버전 이상의 [NodeJS](https://nodejs.org/en/download) 다운로드 및 설치
|
||||
|
||||
1. Flowise 설치
|
||||
```bash
|
||||
npm install -g flowise
|
||||
```
|
||||
2. Flowise 시작하기
|
||||
|
||||
```bash
|
||||
npx flowise start
|
||||
```
|
||||
|
||||
사용자 이름과 비밀번호로 시작하기
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) URL 열기
|
||||
|
||||
## 🐳 도커(Docker)를 활용하여 시작하기
|
||||
|
||||
### 도커 컴포즈 활용
|
||||
|
||||
1. 프로젝트의 최상위(root) 디렉토리에 있는 `docker` 폴더로 이동하세요.
|
||||
2. `.env.example` 파일을 복사한 후, 같은 경로에 붙여넣기 한 다음, `.env`로 이름을 변경합니다.
|
||||
3. `docker compose up -d` 실행
|
||||
4. [http://localhost:3000](http://localhost:3000) URL 열기
|
||||
5. `docker compose stop` 명령어를 통해 컨테이너를 종료시킬 수 있습니다.
|
||||
|
||||
### 도커 이미지 활용
|
||||
|
||||
1. 로컬에서 이미지 빌드하기:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
2. 이미지 실행하기:
|
||||
|
||||
```bash
|
||||
docker run -d --name flowise -p 3000:3000 flowise
|
||||
```
|
||||
|
||||
3. 이미지 종료하기:
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
||||
## 👨💻 개발자들을 위한 가이드
|
||||
|
||||
Flowise는 단일 리포지토리에 3개의 서로 다른 모듈이 있습니다.
|
||||
|
||||
- `server`: API 로직을 제공하는 노드 백엔드
|
||||
- `ui`: 리액트 프론트엔드
|
||||
- `components`: 서드파티 노드 통합을 위한 컴포넌트
|
||||
|
||||
### 사전 설치 요건
|
||||
|
||||
- [PNPM](https://pnpm.io/installation) 설치하기
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### 설치 및 설정
|
||||
|
||||
1. 리포지토리 복제
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. 리포지토리 폴더로 이동
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. 모든 모듈의 종속성 설치:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 모든 코드 빌드하기:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. 애플리케이션 시작:
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
이제 [http://localhost:3000](http://localhost:3000)에서 애플리케이션에 접속할 수 있습니다.
|
||||
|
||||
6. 개발 환경에서 빌드할 경우:
|
||||
|
||||
- `packages/ui`경로에 `.env` 파일을 생성하고 `VITE_PORT`(`.env.example` 참조)를 지정합니다.
|
||||
- `packages/server`경로에 `.env` 파일을 생성하고 `PORT`(`.env.example` 참조)를 지정합니다.
|
||||
- 실행하기
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
코드가 변경되면 [http://localhost:8080](http://localhost:8080)에서 자동으로 애플리케이션을 새로고침 합니다.
|
||||
|
||||
## 🔒 인증
|
||||
|
||||
애플리케이션 수준의 인증을 사용하려면 `packages/server`의 `.env` 파일에 `FLOWISE_USERNAME` 및 `FLOWISE_PASSWORD`를 추가합니다:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 환경 변수
|
||||
|
||||
Flowise는 인스턴스 구성을 위한 다양한 환경 변수를 지원합니다. `packages/server` 폴더 내 `.env` 파일에 다양한 환경 변수를 지정할 수 있습니다. [자세히 보기](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 공식 문서
|
||||
|
||||
[Flowise 문서](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 자체 호스팅 하기
|
||||
|
||||
기존 인프라 환경에서 Flowise를 자체 호스팅으로 배포하세요. 다양한 배포 [deployments](https://docs.flowiseai.com/configuration/deployment) 방법을 지원합니다.
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>그 외</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 클라우드 호스팅 서비스
|
||||
|
||||
곧 출시될 예정입니다.
|
||||
|
||||
## 🙋 기술 지원
|
||||
|
||||
질문, 버그 리포팅, 새로운 기능 요청 등은 [discussion](https://github.com/FlowiseAI/Flowise/discussions) 섹션에서 자유롭게 이야기 해주세요.
|
||||
|
||||
## 🙌 오픈소스 활동에 기여하기
|
||||
|
||||
다음과 같은 멋진 기여자들(contributors)에게 감사드립니다.
|
||||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[contributing guide](CONTRIBUTING.md)를 살펴보세요. 디스코드 [Discord](https://discord.gg/jbaHfsRVBW) 채널에서도 이슈나 질의응답을 진행하실 수 있습니다.
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 라이센스
|
||||
|
||||
본 리포지토리의 소스코드는 [Apache License Version 2.0](LICENSE.md) 라이센스가 적용됩니다.
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](./README.md) | 中文
|
||||
[English](../README.md) | 中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>拖放界面构建定制化的LLM流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
|
|
@ -44,9 +44,9 @@
|
|||
|
||||
1. 进入项目根目录下的 `docker` 文件夹
|
||||
2. 创建 `.env` 文件并指定 `PORT`(参考 `.env.example`)
|
||||
3. 运行 `docker-compose up -d`
|
||||
3. 运行 `docker compose up -d`
|
||||
4. 打开 [http://localhost:3000](http://localhost:3000)
|
||||
5. 可以通过 `docker-compose stop` 停止容器
|
||||
5. 可以通过 `docker compose stop` 停止容器
|
||||
|
||||
### Docker 镜像
|
||||
|
||||
39
package.json
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.6.0",
|
||||
"version": "1.8.4",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -52,16 +52,47 @@
|
|||
"turbo": "1.10.16",
|
||||
"typescript": "^4.8.4"
|
||||
},
|
||||
"packageManager": "pnpm@8.14.0",
|
||||
"pnpm": {
|
||||
"onlyBuiltDependencies": [
|
||||
"faiss-node",
|
||||
"sqlite3"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.15.0 <19.0.0 || ^20"
|
||||
"node": ">=18.15.0 <19.0.0 || ^20",
|
||||
"pnpm": ">=9"
|
||||
},
|
||||
"resolutions": {
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.1"
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.1",
|
||||
"@google/generative-ai": "^0.7.0",
|
||||
"openai": "4.51.0"
|
||||
},
|
||||
"eslintIgnore": [
|
||||
"**/dist",
|
||||
"**/node_modules",
|
||||
"**/build",
|
||||
"**/package-lock.json"
|
||||
],
|
||||
"prettier": {
|
||||
"printWidth": 140,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"tabWidth": 4,
|
||||
"semi": false,
|
||||
"endOfLine": "auto"
|
||||
},
|
||||
"babel": {
|
||||
"presets": [
|
||||
"@babel/preset-typescript",
|
||||
[
|
||||
"@babel/preset-env",
|
||||
{
|
||||
"targets": {
|
||||
"node": "current"
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,13 +10,8 @@ class AstraDBApi implements INodeCredential {
|
|||
constructor() {
|
||||
this.label = 'Astra DB API'
|
||||
this.name = 'AstraDBApi'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Astra DB Collection Name',
|
||||
name: 'collectionName',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Astra DB Application Token',
|
||||
name: 'applicationToken',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class BaiduApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Baidu API'
|
||||
this.name = 'baiduApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Baidu Api Key',
|
||||
name: 'baiduApiKey',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Baidu Secret Key',
|
||||
name: 'baiduSecretKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: BaiduApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ChatflowApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Chatflow API'
|
||||
this.name = 'chatflowApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Chatflow Api Key',
|
||||
name: 'chatflowApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ChatflowApi }
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ConfluenceApi implements INodeCredential {
|
||||
class ConfluenceCloudApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -8,8 +8,8 @@ class ConfluenceApi implements INodeCredential {
|
|||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Confluence API'
|
||||
this.name = 'confluenceApi'
|
||||
this.label = 'Confluence Cloud API'
|
||||
this.name = 'confluenceCloudApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://support.atlassian.com/confluence-cloud/docs/manage-oauth-access-tokens/">official guide</a> on how to get Access Token or <a target="_blank" href="https://id.atlassian.com/manage-profile/security/api-tokens">API Token</a> on Confluence'
|
||||
|
|
@ -30,4 +30,4 @@ class ConfluenceApi implements INodeCredential {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ConfluenceApi }
|
||||
module.exports = { credClass: ConfluenceCloudApi }
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ConfluenceServerDCApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Confluence Server/Data Center API'
|
||||
this.name = 'confluenceServerDCApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html/">official guide</a> on how to get Personal Access Token</a> on Confluence'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Personal Access Token',
|
||||
name: 'personalAccessToken',
|
||||
type: 'password',
|
||||
placeholder: '<CONFLUENCE_PERSONAL_ACCESS_TOKEN>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ConfluenceServerDCApi }
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Temporary disabled due to the incompatibility with the docker node-alpine:
|
||||
* https://github.com/FlowiseAI/Flowise/pull/2303
|
||||
|
||||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class CouchbaseApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Couchbase API'
|
||||
this.name = 'couchbaseApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Couchbase Connection String',
|
||||
name: 'connectionString',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Couchbase Username',
|
||||
name: 'username',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Couchbase Password',
|
||||
name: 'password',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: CouchbaseApi }
|
||||
*/
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* TODO: Implement codeInterpreter column to chat_message table
|
||||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class E2BApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'E2B API'
|
||||
this.name = 'E2BApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'E2B Api Key',
|
||||
name: 'e2bApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: E2BApi }
|
||||
*/
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ExaSearchApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Exa Search API'
|
||||
this.name = 'exaSearchApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.exa.ai/reference/getting-started#getting-access">official guide</a> on how to get an API Key from Exa'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'ExaSearch Api Key',
|
||||
name: 'exaSearchApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ExaSearchApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class FireCrawlApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'FireCrawl API'
|
||||
this.name = 'fireCrawlApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the FireCrawl API token on your <a target="_blank" href="https://www.firecrawl.dev/">FireCrawl account</a> page.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'FireCrawl API',
|
||||
name: 'firecrawlApiToken',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: FireCrawlApiCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class FireworksApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Fireworks API'
|
||||
this.name = 'fireworksApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Fireworks Api Key',
|
||||
name: 'fireworksApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: FireworksApi }
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class LangWatchApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'LangWatch API'
|
||||
this.name = 'langwatchApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.langwatch.ai/integration/python/guide">integration guide</a> on how to get API keys on LangWatch'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'langWatchApiKey',
|
||||
type: 'password',
|
||||
placeholder: '<LANGWATCH_API_KEY>'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'langWatchEndpoint',
|
||||
type: 'string',
|
||||
default: 'https://app.langwatch.ai'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: LangWatchApi }
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class MotorheadMemoryApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Motorhead Memory API'
|
||||
this.name = 'motorheadMemoryApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.getmetal.io/misc-get-keys">official guide</a> on how to create API key and Client ID on Motorhead Memory'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'apiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MotorheadMemoryApi }
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class MySQLApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'MySQL API'
|
||||
this.name = 'MySQLApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'User',
|
||||
name: 'user',
|
||||
type: 'string',
|
||||
placeholder: '<MYSQL_USERNAME>'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'password',
|
||||
type: 'password',
|
||||
placeholder: '<MYSQL_PASSWORD>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MySQLApi }
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class OpenSearchUrl implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'OpenSearch'
|
||||
this.name = 'openSearchUrl'
|
||||
this.version = 2.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'OpenSearch Url',
|
||||
name: 'openSearchUrl',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'User',
|
||||
name: 'user',
|
||||
type: 'string',
|
||||
placeholder: '<OPENSEARCH_USERNAME>',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'password',
|
||||
type: 'password',
|
||||
placeholder: '<OPENSEARCH_PASSWORD>',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: OpenSearchUrl }
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class SpiderApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Spider API'
|
||||
this.name = 'spiderApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Get your API key from the <a target="_blank" href="https://spider.cloud">Spider</a> dashboard.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Spider API Key',
|
||||
name: 'spiderApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: SpiderApiCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TogetherAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'TogetherAI API'
|
||||
this.name = 'togetherAIApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'TogetherAI Api Key',
|
||||
name: 'togetherAIApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TogetherAIApi }
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class UpstashVectorApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Vector API'
|
||||
this.name = 'upstashVectorApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Upstash Vector REST URL',
|
||||
name: 'UPSTASH_VECTOR_REST_URL',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Upstash Vector REST Token',
|
||||
name: 'UPSTASH_VECTOR_REST_TOKEN',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: UpstashVectorApi }
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class VoyageAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Voyage AI API'
|
||||
this.name = 'voyageAIApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.voyageai.com/install/#authentication-with-api-keys">official guide</a> on how to get an API Key'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Voyage AI Endpoint',
|
||||
name: 'endpoint',
|
||||
type: 'string',
|
||||
default: 'https://api.voyageai.com/v1/embeddings'
|
||||
},
|
||||
{
|
||||
label: 'Voyage AI API Key',
|
||||
name: 'apiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: VoyageAIApi }
|
||||
|
|
@ -7,6 +7,7 @@ import { getBaseClasses } from '../../../src/utils'
|
|||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { getFileFromStorage } from '../../../src'
|
||||
|
||||
class CSV_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -22,7 +23,7 @@ class CSV_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'CSV Agent'
|
||||
this.name = 'csvAgent'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'CSVagent.svg'
|
||||
|
|
@ -57,6 +58,16 @@ class CSV_Agents implements INode {
|
|||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Custom Pandas Read_CSV Code',
|
||||
description:
|
||||
'Custom Pandas <a target="_blank" href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html">read_csv</a> function. Takes in an input: "csv_data"',
|
||||
name: 'customReadCSV',
|
||||
default: 'read_csv(csv_data)',
|
||||
type: 'code',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -71,6 +82,7 @@ class CSV_Agents implements INode {
|
|||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const _customReadCSV = nodeData.inputs?.customReadCSV as string
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -88,19 +100,33 @@ class CSV_Agents implements INode {
|
|||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let files: string[] = []
|
||||
|
||||
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
|
||||
files = JSON.parse(csvFileBase64)
|
||||
} else {
|
||||
files = [csvFileBase64]
|
||||
}
|
||||
|
||||
let base64String = ''
|
||||
|
||||
for (const file of files) {
|
||||
const splitDataURI = file.split(',')
|
||||
splitDataURI.pop()
|
||||
base64String = splitDataURI.pop() ?? ''
|
||||
if (csvFileBase64.startsWith('FILE-STORAGE::')) {
|
||||
const fileName = csvFileBase64.replace('FILE-STORAGE::', '')
|
||||
if (fileName.startsWith('[') && fileName.endsWith(']')) {
|
||||
files = JSON.parse(fileName)
|
||||
} else {
|
||||
files = [fileName]
|
||||
}
|
||||
const chatflowid = options.chatflowid
|
||||
|
||||
for (const file of files) {
|
||||
const fileData = await getFileFromStorage(file, chatflowid)
|
||||
base64String += fileData.toString('base64')
|
||||
}
|
||||
} else {
|
||||
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
|
||||
files = JSON.parse(csvFileBase64)
|
||||
} else {
|
||||
files = [csvFileBase64]
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const splitDataURI = file.split(',')
|
||||
splitDataURI.pop()
|
||||
base64String += splitDataURI.pop() ?? ''
|
||||
}
|
||||
}
|
||||
|
||||
const pyodide = await LoadPyodide()
|
||||
|
|
@ -108,6 +134,7 @@ class CSV_Agents implements INode {
|
|||
// First load the csv file and get the dataframe dictionary of column types
|
||||
// For example using titanic.csv: {'PassengerId': 'int64', 'Survived': 'int64', 'Pclass': 'int64', 'Name': 'object', 'Sex': 'object', 'Age': 'float64', 'SibSp': 'int64', 'Parch': 'int64', 'Ticket': 'object', 'Fare': 'float64', 'Cabin': 'object', 'Embarked': 'object'}
|
||||
let dataframeColDict = ''
|
||||
let customReadCSVFunc = _customReadCSV ? _customReadCSV : 'read_csv(csv_data)'
|
||||
try {
|
||||
const code = `import pandas as pd
|
||||
import base64
|
||||
|
|
@ -120,7 +147,7 @@ decoded_data = base64.b64decode(base64_string)
|
|||
|
||||
csv_data = StringIO(decoded_data.decode('utf-8'))
|
||||
|
||||
df = pd.read_csv(csv_data)
|
||||
df = pd.${customReadCSVFunc}
|
||||
my_dict = df.dtypes.astype(str).to_dict()
|
||||
print(my_dict)
|
||||
json.dumps(my_dict)`
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
|
|||
import { ChatConversationalAgent } from 'langchain/agents'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
|
|
@ -86,13 +86,20 @@ class ConversationalAgent_Agents implements INode {
|
|||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
|
|
@ -109,23 +116,34 @@ class ConversationalAgent_Agents implements INode {
|
|||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(
|
||||
nodeData,
|
||||
options,
|
||||
{ sessionId: this.sessionId, chatId: options.chatId, input },
|
||||
options.chatHistory
|
||||
)
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
|
|
@ -142,23 +160,37 @@ class ConversationalAgent_Agents implements INode {
|
|||
this.sessionId
|
||||
)
|
||||
|
||||
return res?.output
|
||||
let finalRes = res?.output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
finalRes = { text: res?.output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
tools = flatten(tools)
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
|
||||
llm: model,
|
||||
|
|
@ -172,7 +204,7 @@ const prepareAgent = async (
|
|||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
|
@ -209,7 +241,7 @@ const prepareAgent = async (
|
|||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
|
|
@ -224,7 +256,8 @@ const prepareAgent = async (
|
|||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
|
|
|
|||
|
|
@ -1,179 +0,0 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
|
||||
|
||||
class ConversationalRetrievalAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Agent'
|
||||
this.name = 'conversationalRetrievalAgent'
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = `An agent optimized for retrieval during conversation, answering questions based on past dialogue, all using OpenAI's Function Calling`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
default: defaultMessage,
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return res?.output
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['ai', systemMessage ? systemMessage : defaultMessage],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const modelWithFunctions = model.bind({
|
||||
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
|
||||
})
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithFunctions,
|
||||
new OpenAIFunctionsAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
returnIntermediateSteps: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><circle cx="16" cy="16" r="14" fill="#CC9B7A"/><path d="m10 21 4.5-10L19 21m-7.2-2.857h5.4M18.5 11 23 21" stroke="#1F1F1E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
||||
|
After Width: | Height: | Size: 269 B |
|
|
@ -0,0 +1,142 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { MessageContentTextDetail, ChatMessage, AnthropicAgent, Anthropic } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
|
||||
|
||||
class AnthropicAgent_LlamaIndex_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Anthropic Agent'
|
||||
this.name = 'anthropicAgentLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'AnthropicAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.description = `Agent that uses Anthropic Claude Function Calling to pick the tools and args to call using LlamaIndex`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AnthropicAgent)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool_LlamaIndex',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Anthropic Claude Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const model = nodeData.inputs?.model as Anthropic
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessage) {
|
||||
chatHistory.push({
|
||||
content: systemMessage,
|
||||
role: 'system'
|
||||
})
|
||||
}
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const agent = new AnthropicAgent({
|
||||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
let text = ''
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(response.response.message.content) && response.response.message.content.length > 0) {
|
||||
text = (response.response.message.content[0] as MessageContentTextDetail).text
|
||||
} else {
|
||||
text = response.response.message.content as string
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return usedTools.length ? { text: text, usedTools } : text
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AnthropicAgent_LlamaIndex_Agents }
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChatMessage, OpenAI, OpenAIAgent } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
|
||||
|
||||
class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'OpenAI Tool Agent'
|
||||
this.name = 'openAIToolAgentLlamaIndex'
|
||||
this.version = 2.0
|
||||
this.type = 'OpenAIToolAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'function.svg'
|
||||
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call using LlamaIndex`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(OpenAIAgent)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool_LlamaIndex',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const model = nodeData.inputs?.model as OpenAI
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessage) {
|
||||
chatHistory.push({
|
||||
content: systemMessage,
|
||||
role: 'system'
|
||||
})
|
||||
}
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const agent = new OpenAIAgent({
|
||||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
let text = ''
|
||||
let isStreamingStarted = false
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await agent.chat({
|
||||
message: input,
|
||||
chatHistory,
|
||||
stream: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
for await (const chunk of stream) {
|
||||
//console.log('chunk', chunk)
|
||||
text += chunk.response.delta
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response.delta)
|
||||
if (chunk.sources.length) {
|
||||
for (const sourceTool of chunk.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', usedTools)
|
||||
}
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response.delta)
|
||||
}
|
||||
} else {
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
text = response.response.message.content as string
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return usedTools.length ? { text: text, usedTools } : text
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: OpenAIFunctionAgent_LlamaIndex_Agents }
|
||||
|
Before Width: | Height: | Size: 2.3 KiB After Width: | Height: | Size: 2.3 KiB |
|
|
@ -1,7 +0,0 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10 6C10 5.44772 10.4477 5 11 5H21C21.5523 5 22 5.44772 22 6V11C22 13.2091 20.2091 15 18 15H14C11.7909 15 10 13.2091 10 11V6Z" stroke="black" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M16 5V3" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<circle cx="14" cy="9" r="1.5" fill="black"/>
|
||||
<circle cx="18" cy="9" r="1.5" fill="black"/>
|
||||
<path d="M26 27C26 22.0294 21.5228 18 16 18C10.4772 18 6 22.0294 6 27" stroke="black" stroke-width="2" stroke-linecap="round"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 616 B |
|
|
@ -1,16 +1,17 @@
|
|||
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import OpenAI from 'openai'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { getCredentialData, getCredentialParam, getUserHome } from '../../../src/utils'
|
||||
import { MessageContentImageFile, MessageContentText } from 'openai/resources/beta/threads/messages/messages'
|
||||
import * as fsDefault from 'node:fs'
|
||||
import * as path from 'node:path'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
import { flatten, uniqWith, isEqual } from 'lodash'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addSingleFileToStorage } from '../../../src/storageUtils'
|
||||
|
||||
const lenticularBracketRegex = /【[^】]*】/g
|
||||
const imageRegex = /<img[^>]*\/>/g
|
||||
|
||||
class OpenAIAssistant_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -26,7 +27,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAI Assistant'
|
||||
this.name = 'openAIAssistant'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'OpenAIAssistant'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'assistant.svg'
|
||||
|
|
@ -53,6 +54,25 @@ class OpenAIAssistant_Agents implements INode {
|
|||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Tool Choice',
|
||||
name: 'toolChoice',
|
||||
type: 'string',
|
||||
description:
|
||||
'Controls which (if any) tool is called by the model. Can be "none", "auto", "required", or the name of a tool. Refer <a href="https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tool_choice" target="_blank">here</a> for more information',
|
||||
placeholder: 'file_search',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Parallel Tool Calls',
|
||||
name: 'parallelToolCalls',
|
||||
type: 'boolean',
|
||||
description: 'Whether to enable parallel function calling during tool use. Defaults to true',
|
||||
default: true,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Disable File Download',
|
||||
name: 'disableFileDownload',
|
||||
|
|
@ -137,10 +157,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const openai = new OpenAI({ apiKey: openAIApiKey })
|
||||
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
|
||||
try {
|
||||
if (sessionId) await openai.beta.threads.del(sessionId)
|
||||
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
if (sessionId && sessionId.startsWith('thread_')) {
|
||||
await openai.beta.threads.del(sessionId)
|
||||
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
} else {
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -150,6 +174,8 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const disableFileDownload = nodeData.inputs?.disableFileDownload as boolean
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const _toolChoice = nodeData.inputs?.toolChoice as string
|
||||
const parallelToolCalls = nodeData.inputs?.parallelToolCalls as boolean
|
||||
const isStreaming = options.socketIO && options.socketIOClientId
|
||||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
|
|
@ -168,6 +194,9 @@ class OpenAIAssistant_Agents implements INode {
|
|||
tools = flatten(tools)
|
||||
const formattedTools = tools?.map((tool: any) => formatToOpenAIAssistantTool(tool)) ?? []
|
||||
|
||||
const usedTools: IUsedTool[] = []
|
||||
const fileAnnotations = []
|
||||
|
||||
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
|
||||
id: selectedAssistantId
|
||||
})
|
||||
|
|
@ -195,7 +224,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (formattedTools.length) {
|
||||
let filteredTools = []
|
||||
for (const tool of retrievedAssistant.tools) {
|
||||
if (tool.type === 'code_interpreter' || tool.type === 'retrieval') filteredTools.push(tool)
|
||||
if (tool.type === 'code_interpreter' || tool.type === 'file_search') filteredTools.push(tool)
|
||||
}
|
||||
filteredTools = uniqWith([...filteredTools, ...formattedTools], isEqual)
|
||||
// filter out tool with empty function
|
||||
|
|
@ -236,7 +265,8 @@ class OpenAIAssistant_Agents implements INode {
|
|||
(runStatus === 'cancelled' ||
|
||||
runStatus === 'completed' ||
|
||||
runStatus === 'expired' ||
|
||||
runStatus === 'failed')
|
||||
runStatus === 'failed' ||
|
||||
runStatus === 'requires_action')
|
||||
) {
|
||||
clearInterval(timeout)
|
||||
resolve()
|
||||
|
|
@ -259,11 +289,256 @@ class OpenAIAssistant_Agents implements INode {
|
|||
|
||||
// Run assistant thread
|
||||
const llmIds = await analyticHandlers.onLLMStart('ChatOpenAI', input, parentIds)
|
||||
const runThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id
|
||||
})
|
||||
|
||||
const usedTools: IUsedTool[] = []
|
||||
let text = ''
|
||||
let runThreadId = ''
|
||||
let isStreamingStarted = false
|
||||
|
||||
let toolChoice: any
|
||||
if (_toolChoice) {
|
||||
if (_toolChoice === 'file_search') {
|
||||
toolChoice = { type: 'file_search' }
|
||||
} else if (_toolChoice === 'code_interpreter') {
|
||||
toolChoice = { type: 'code_interpreter' }
|
||||
} else if (_toolChoice === 'none' || _toolChoice === 'auto' || _toolChoice === 'required') {
|
||||
toolChoice = _toolChoice
|
||||
} else {
|
||||
toolChoice = { type: 'function', function: { name: _toolChoice } }
|
||||
}
|
||||
}
|
||||
|
||||
if (isStreaming) {
|
||||
const streamThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id,
|
||||
stream: true,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
|
||||
for await (const event of streamThread) {
|
||||
if (event.event === 'thread.run.created') {
|
||||
runThreadId = event.data.id
|
||||
}
|
||||
|
||||
if (event.event === 'thread.message.delta') {
|
||||
const chunk = event.data.delta.content?.[0]
|
||||
|
||||
if (chunk && 'text' in chunk) {
|
||||
if (chunk.text?.annotations?.length) {
|
||||
const message_content = chunk.text
|
||||
const annotations = chunk.text?.annotations
|
||||
|
||||
// Iterate over the annotations
|
||||
for (let index = 0; index < annotations.length; index++) {
|
||||
const annotation = annotations[index]
|
||||
let filePath = ''
|
||||
|
||||
// Gather citations based on annotation attributes
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
|
||||
if (file_citation) {
|
||||
const cited_file = await openai.files.retrieve(file_citation.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
if (file_path) {
|
||||
const cited_file = await openai.files.retrieve(file_path.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the text with a footnote
|
||||
message_content.value = message_content.value?.replace(
|
||||
`${annotation.text}`,
|
||||
`${disableFileDownload ? '' : filePath}`
|
||||
)
|
||||
}
|
||||
|
||||
// Remove lenticular brackets
|
||||
message_content.value = message_content.value?.replace(lenticularBracketRegex, '')
|
||||
|
||||
text += message_content.value ?? ''
|
||||
|
||||
if (message_content.value) {
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', message_content.value)
|
||||
}
|
||||
socketIO.to(socketIOClientId).emit('token', message_content.value)
|
||||
}
|
||||
|
||||
if (fileAnnotations.length) {
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', '')
|
||||
}
|
||||
socketIO.to(socketIOClientId).emit('fileAnnotations', fileAnnotations)
|
||||
}
|
||||
} else {
|
||||
text += chunk.text?.value
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', chunk.text?.value)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', chunk.text?.value)
|
||||
}
|
||||
}
|
||||
|
||||
if (chunk && 'image_file' in chunk && chunk.image_file?.file_id) {
|
||||
const fileId = chunk.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
|
||||
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
|
||||
const base64String = Buffer.from(buffer).toString('base64')
|
||||
|
||||
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
|
||||
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
|
||||
text += imgHTML
|
||||
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', imgHTML)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', imgHTML)
|
||||
}
|
||||
}
|
||||
|
||||
if (event.event === 'thread.run.requires_action') {
|
||||
if (event.data.required_action?.submit_tool_outputs.tool_calls) {
|
||||
const actions: ICommonObject[] = []
|
||||
event.data.required_action.submit_tool_outputs.tool_calls.forEach((item) => {
|
||||
const functionCall = item.function
|
||||
let args = {}
|
||||
try {
|
||||
args = JSON.parse(functionCall.arguments)
|
||||
} catch (e) {
|
||||
console.error('Error parsing arguments, default to empty object')
|
||||
}
|
||||
actions.push({
|
||||
tool: functionCall.name,
|
||||
toolInput: args,
|
||||
toolCallId: item.id
|
||||
})
|
||||
})
|
||||
|
||||
const submitToolOutputs = []
|
||||
for (let i = 0; i < actions.length; i += 1) {
|
||||
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
|
||||
if (!tool) continue
|
||||
|
||||
// Start tool analytics
|
||||
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
|
||||
|
||||
try {
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
|
||||
sessionId: threadId,
|
||||
chatId: options.chatId,
|
||||
input
|
||||
})
|
||||
await analyticHandlers.onToolEnd(toolIds, toolOutput)
|
||||
submitToolOutputs.push({
|
||||
tool_call_id: actions[i].toolCallId,
|
||||
output: toolOutput
|
||||
})
|
||||
usedTools.push({
|
||||
tool: tool.name,
|
||||
toolInput: actions[i].toolInput,
|
||||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, {
|
||||
tool_outputs: submitToolOutputs
|
||||
})
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.event === 'thread.message.delta') {
|
||||
const chunk = event.data.delta.content?.[0]
|
||||
if (chunk && 'text' in chunk && chunk.text?.value) {
|
||||
text += chunk.text.value
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', chunk.text.value)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', chunk.text.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('usedTools', usedTools)
|
||||
} catch (error) {
|
||||
console.error('Error submitting tool outputs:', error)
|
||||
await openai.beta.threads.runs.cancel(threadId, runThreadId)
|
||||
|
||||
const errMsg = `Error submitting tool outputs. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
||||
await analyticHandlers.onLLMError(llmIds, errMsg)
|
||||
await analyticHandlers.onChainError(parentIds, errMsg, true)
|
||||
|
||||
throw new Error(errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List messages
|
||||
const messages = await openai.beta.threads.messages.list(threadId)
|
||||
const messageData = messages.data ?? []
|
||||
const assistantMessages = messageData.filter((msg) => msg.role === 'assistant')
|
||||
if (!assistantMessages.length) return ''
|
||||
|
||||
// Remove images from the logging text
|
||||
let llmOutput = text.replace(imageRegex, '')
|
||||
llmOutput = llmOutput.replace('<br/>', '')
|
||||
|
||||
await analyticHandlers.onLLMEnd(llmIds, llmOutput)
|
||||
await analyticHandlers.onChainEnd(parentIds, messageData, true)
|
||||
|
||||
return {
|
||||
text,
|
||||
usedTools,
|
||||
fileAnnotations,
|
||||
assistant: { assistantId: openAIAssistantId, threadId, runId: runThreadId, messages: messageData }
|
||||
}
|
||||
}
|
||||
|
||||
const promise = (threadId: string, runId: string) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
|
|
@ -299,8 +574,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
|
||||
// Start tool analytics
|
||||
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
|
||||
if (options.socketIO && options.socketIOClientId)
|
||||
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
|
||||
if (socketIO && socketIOClientId) socketIO.to(socketIOClientId).emit('tool', tool.name)
|
||||
|
||||
try {
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
|
||||
|
|
@ -360,7 +634,12 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
|
||||
// Polling run status
|
||||
let runThreadId = runThread.id
|
||||
const runThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
runThreadId = runThread.id
|
||||
let state = await promise(threadId, runThread.id)
|
||||
while (state === 'requires_action') {
|
||||
state = await promise(threadId, runThread.id)
|
||||
|
|
@ -371,7 +650,9 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (retries > 0) {
|
||||
retries -= 1
|
||||
const newRunThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id
|
||||
assistant_id: retrievedAssistant.id,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
runThreadId = newRunThread.id
|
||||
state = await promise(threadId, newRunThread.id)
|
||||
|
|
@ -389,46 +670,47 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (!assistantMessages.length) return ''
|
||||
|
||||
let returnVal = ''
|
||||
const fileAnnotations = []
|
||||
for (let i = 0; i < assistantMessages[0].content.length; i += 1) {
|
||||
if (assistantMessages[0].content[i].type === 'text') {
|
||||
const content = assistantMessages[0].content[i] as MessageContentText
|
||||
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.TextContentBlock
|
||||
|
||||
if (content.text.annotations) {
|
||||
const message_content = content.text
|
||||
const annotations = message_content.annotations
|
||||
|
||||
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
|
||||
|
||||
// Iterate over the annotations
|
||||
for (let index = 0; index < annotations.length; index++) {
|
||||
const annotation = annotations[index]
|
||||
let filePath = ''
|
||||
|
||||
// Gather citations based on annotation attributes
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FileCitation)
|
||||
.file_citation
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
|
||||
|
||||
if (file_citation) {
|
||||
const cited_file = await openai.files.retrieve(file_citation.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
|
||||
if (!disableFileDownload) {
|
||||
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
|
||||
filePath = await downloadFile(openAIApiKey, cited_file, fileName, options.chatflowid, options.chatId)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FilePath).file_path
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
if (file_path) {
|
||||
const cited_file = await openai.files.retrieve(file_path.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
|
||||
if (!disableFileDownload) {
|
||||
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
|
|
@ -449,19 +731,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
returnVal += content.text.value
|
||||
}
|
||||
|
||||
const lenticularBracketRegex = /【[^】]*】/g
|
||||
returnVal = returnVal.replace(lenticularBracketRegex, '')
|
||||
} else {
|
||||
const content = assistantMessages[0].content[i] as MessageContentImageFile
|
||||
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.ImageFileContentBlock
|
||||
const fileId = content.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
|
||||
const filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', `${fileObj.filename}.png`)
|
||||
|
||||
await downloadImg(openai, fileId, filePath, dirPath)
|
||||
|
||||
const bitmap = fsDefault.readFileSync(filePath)
|
||||
const base64String = Buffer.from(bitmap).toString('base64')
|
||||
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
|
||||
const base64String = Buffer.from(buffer).toString('base64')
|
||||
|
||||
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
|
||||
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
|
||||
|
|
@ -469,7 +746,6 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const imageRegex = /<img[^>]*\/>/g
|
||||
let llmOutput = returnVal.replace(imageRegex, '')
|
||||
llmOutput = llmOutput.replace('<br/>', '')
|
||||
|
||||
|
|
@ -489,7 +765,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const downloadImg = async (openai: OpenAI, fileId: string, filePath: string, dirPath: string) => {
|
||||
const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...paths: string[]) => {
|
||||
const response = await openai.files.content(fileId)
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
|
|
@ -497,15 +773,14 @@ const downloadImg = async (openai: OpenAI, fileId: string, filePath: string, dir
|
|||
|
||||
// Convert the binary data to a Buffer
|
||||
const image_data_buffer = Buffer.from(image_data)
|
||||
const mime = 'image/png'
|
||||
|
||||
// Save the image to a specific location
|
||||
if (!fsDefault.existsSync(dirPath)) {
|
||||
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
|
||||
}
|
||||
fsDefault.writeFileSync(filePath, image_data_buffer)
|
||||
await addSingleFileToStorage(mime, image_data_buffer, fileName, ...paths)
|
||||
|
||||
return image_data_buffer
|
||||
}
|
||||
|
||||
const downloadFile = async (fileObj: any, filePath: string, dirPath: string, openAIApiKey: string) => {
|
||||
const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string, ...paths: string[]) => {
|
||||
try {
|
||||
const response = await fetch(`https://api.openai.com/v1/files/${fileObj.id}/content`, {
|
||||
method: 'GET',
|
||||
|
|
@ -516,24 +791,21 @@ const downloadFile = async (fileObj: any, filePath: string, dirPath: string, ope
|
|||
throw new Error(`HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
if (!fsDefault.existsSync(dirPath)) {
|
||||
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
|
||||
}
|
||||
const dest = fsDefault.createWriteStream(filePath)
|
||||
response.body.pipe(dest)
|
||||
response.body.on('end', () => resolve())
|
||||
dest.on('error', reject)
|
||||
})
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('File downloaded and written to', filePath)
|
||||
// Convert the binary data to a Buffer
|
||||
const data_buffer = Buffer.from(data)
|
||||
const mime = 'application/octet-stream'
|
||||
|
||||
return await addSingleFileToStorage(mime, data_buffer, fileName, ...paths)
|
||||
} catch (error) {
|
||||
console.error('Error downloading or writing the file:', error)
|
||||
return ''
|
||||
}
|
||||
}
|
||||
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreateParams.AssistantToolsFunction => {
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => {
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
|
|
|
|||
|
|
@ -1,183 +0,0 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { ChatOpenAI, formatToOpenAIFunction } from '@langchain/openai'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
import { Moderation, checkInputs } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class OpenAIFunctionAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'OpenAI Function Agent'
|
||||
this.name = 'openAIFunctionAgent'
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'function.svg'
|
||||
this.description = `An agent that uses Function Calling to pick the tool and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const modelWithFunctions = model.bind({
|
||||
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
|
||||
})
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithFunctions,
|
||||
new OpenAIFunctionsAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: OpenAIFunctionAgent_Agents }
|
||||
|
|
@ -13,7 +13,7 @@ import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalU
|
|||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
class ReActAgentChat_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -27,7 +27,7 @@ class MRKLAgentChat_Agents implements INode {
|
|||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'ReAct Agent for Chat Models'
|
||||
this.name = 'mrklAgentChat'
|
||||
this.name = 'reactAgentChat'
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -58,6 +58,13 @@ class MRKLAgentChat_Agents implements INode {
|
|||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -69,9 +76,11 @@ class MRKLAgentChat_Agents implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -90,7 +99,7 @@ class MRKLAgentChat_Agents implements INode {
|
|||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to vision supported
|
||||
|
|
@ -120,13 +129,13 @@ class MRKLAgentChat_Agents implements INode {
|
|||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const prevChatHistory = options.chatHistory
|
||||
const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prevChatHistory)) as IMessage[]) ?? []
|
||||
const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
|
||||
const chatHistoryString = chatHistory.map((hist) => hist.message).join('\\n')
|
||||
|
||||
const result = await executor.invoke({ input, chat_history: chatHistoryString }, { callbacks })
|
||||
|
|
@ -149,4 +158,4 @@ class MRKLAgentChat_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentChat_Agents }
|
||||
module.exports = { nodeClass: ReActAgentChat_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -11,7 +11,7 @@ import { createReactAgent } from '../../../src/agents'
|
|||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentLLM_Agents implements INode {
|
||||
class ReActAgentLLM_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -24,7 +24,7 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
|
||||
constructor() {
|
||||
this.label = 'ReAct Agent for LLMs'
|
||||
this.name = 'mrklAgentLLM'
|
||||
this.name = 'reactAgentLLM'
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -50,6 +50,13 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -60,6 +67,7 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
|
|
@ -87,7 +95,8 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
|
@ -98,4 +107,4 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentLLM_Agents }
|
||||
module.exports = { nodeClass: ReActAgentLLM_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -0,0 +1,264 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool, IVisionChatModal } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
|
||||
class ToolAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Tool Agent'
|
||||
this.name = 'toolAgent'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'toolAgent.png'
|
||||
this.description = `Agent that uses Function Calling to pick the tools and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Tool Calling Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
default: `You are a helpful AI assistant.`,
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
const isStreamable = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
if (isStreamable)
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (isStreamable) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
let output = res?.output as string
|
||||
|
||||
// Claude 3 Opus tends to spit out <thinking>..</thinking> as well, discard that in final output
|
||||
const regexPattern: RegExp = /<thinking>[\s\S]*?<\/thinking>/
|
||||
const matches: RegExpMatchArray | null = output.match(regexPattern)
|
||||
if (matches) {
|
||||
for (const match of matches) {
|
||||
output = output.replace(match, '')
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
let finalRes = output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
const finalRes: ICommonObject = { text: output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
prompt.promptMessages.push(msg)
|
||||
}
|
||||
|
||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
if (model.bindTools === undefined) {
|
||||
throw new Error(`This agent requires that the "bindTools()" method be implemented on the input model.`)
|
||||
}
|
||||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithTools,
|
||||
new ToolCallingAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ToolAgent_Agents }
|
||||
|
After Width: | Height: | Size: 17 KiB |
|
|
@ -5,12 +5,11 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser'
|
||||
import { formatLogToMessage } from 'langchain/agents/format_scratchpad/log_to_message'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { AgentExecutor, XMLAgentOutputParser } from '../../../src/agents'
|
||||
import { Moderation, checkInputs } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
|
|
@ -49,6 +48,7 @@ class XMLAgent_Agents implements INode {
|
|||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'XML Agent'
|
||||
|
|
@ -92,6 +92,13 @@ class XMLAgent_Agents implements INode {
|
|||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -115,13 +122,14 @@ class XMLAgent_Agents implements INode {
|
|||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
|
|
@ -130,11 +138,18 @@ class XMLAgent_Agents implements INode {
|
|||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
|
|
@ -151,22 +166,37 @@ class XMLAgent_Agents implements INode {
|
|||
this.sessionId
|
||||
)
|
||||
|
||||
return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output
|
||||
let finalRes = res?.output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
finalRes = { text: res?.output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
let promptMessage = systemMessage ? systemMessage : defaultSystemMessage
|
||||
if (memory.memoryKey) promptMessage = promptMessage.replaceAll('{chat_history}', `{${memory.memoryKey}}`)
|
||||
|
|
@ -185,7 +215,7 @@ const prepareAgent = async (
|
|||
|
||||
const llmWithStop = model.bind({ stop: ['</tool_input>', '</final_answer>'] })
|
||||
|
||||
const messages = (await memory.getChatMessages(flowObj.sessionId, false, chatHistory)) as IMessage[]
|
||||
const messages = (await memory.getChatMessages(flowObj.sessionId, false, prependMessages)) as IMessage[]
|
||||
let chatHistoryMsgTxt = ''
|
||||
for (const message of messages) {
|
||||
if (message.type === 'apiMessage') {
|
||||
|
|
@ -215,7 +245,8 @@ const prepareAgent = async (
|
|||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
isXML: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
<svg width="38" height="52" viewBox="0 0 38 52" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0 12.383V41.035C0 41.392 0.190002 41.723 0.500002 41.901L17.095 51.481C17.25 51.571 17.422 51.616 17.595 51.616C17.768 51.616 17.94 51.571 18.095 51.481L37.279 40.409C37.589 40.23 37.779 39.9 37.779 39.543V10.887C37.779 10.53 37.589 10.199 37.279 10.021L31.168 6.49498C31.014 6.40598 30.841 6.36098 30.669 6.36098C30.496 6.36098 30.323 6.40498 30.169 6.49498L27.295 8.15398V4.83698C27.295 4.47998 27.105 4.14898 26.795 3.97098L20.684 0.441982C20.529 0.352982 20.357 0.307983 20.184 0.307983C20.011 0.307983 19.839 0.352982 19.684 0.441982L13.781 3.85098C13.471 4.02998 13.281 4.35998 13.281 4.71698V12.157L12.921 12.365V11.872C12.921 11.515 12.731 11.185 12.421 11.006L7.405 8.10698C7.25 8.01798 7.077 7.97298 6.905 7.97298C6.733 7.97298 6.56 8.01798 6.405 8.10698L0.501001 11.517C0.191001 11.695 0 12.025 0 12.383ZM1.5 13.248L5.519 15.566V23.294C5.519 23.304 5.524 23.313 5.525 23.323C5.526 23.345 5.529 23.366 5.534 23.388C5.538 23.411 5.544 23.433 5.552 23.455C5.559 23.476 5.567 23.496 5.577 23.516C5.582 23.525 5.581 23.535 5.587 23.544C5.591 23.551 5.6 23.554 5.604 23.561C5.617 23.581 5.63 23.6 5.646 23.618C5.669 23.644 5.695 23.665 5.724 23.686C5.741 23.698 5.751 23.716 5.77 23.727L11.236 26.886C11.243 26.89 11.252 26.888 11.26 26.892C11.328 26.927 11.402 26.952 11.484 26.952C11.566 26.952 11.641 26.928 11.709 26.893C11.728 26.883 11.743 26.87 11.761 26.858C11.812 26.823 11.855 26.781 11.89 26.731C11.898 26.719 11.911 26.715 11.919 26.702C11.924 26.693 11.924 26.682 11.929 26.674C11.944 26.644 11.951 26.613 11.96 26.58C11.969 26.547 11.978 26.515 11.98 26.481C11.98 26.471 11.986 26.462 11.986 26.452V20.138V19.302L17.096 22.251V49.749L1.5 40.747V13.248ZM35.778 10.887L30.879 13.718L25.768 10.766L26.544 10.317L30.668 7.93698L35.778 10.887ZM25.293 4.83598L20.391 7.66498L15.281 4.71598L20.183 1.88398L25.293 4.83598ZM10.92 11.872L6.019 14.701L2.001 12.383L6.904 9.55098L10.92 11.872ZM20.956 16.51L24.268 14.601V18.788C24.268 18.809 24.278 18.827 24.28 18.848C24.284 18.883 24.29 18.917 24.301 18.95C24.311 18.98 24.325 19.007 24.342 19.034C24.358 19.061 24.373 19.088 24.395 19.112C24.417 19.138 24.444 19.159 24.471 19.18C24.489 19.193 24.499 19.21 24.518 19.221L29.878 22.314L23.998 25.708V18.557C23.998 18.547 23.993 18.538 23.992 18.528C23.991 18.506 23.988 18.485 23.984 18.463C23.979 18.44 23.973 18.418 23.965 18.396C23.958 18.375 23.95 18.355 23.941 18.336C23.936 18.327 23.937 18.316 23.931 18.308C23.925 18.299 23.917 18.294 23.911 18.286C23.898 18.267 23.886 18.251 23.871 18.234C23.855 18.216 23.84 18.2 23.822 18.185C23.805 18.17 23.788 18.157 23.769 18.144C23.76 18.138 23.756 18.129 23.747 18.124L20.956 16.51ZM25.268 11.633L30.379 14.585V21.448L25.268 18.499V13.736V11.633ZM12.486 18.437L17.389 15.604L22.498 18.556L17.595 21.385L12.486 18.437ZM10.985 25.587L7.019 23.295L10.985 21.005V25.587ZM12.42 14.385L14.28 13.311L16.822 14.777L12.42 17.32V14.385ZM14.78 5.58198L19.891 8.53098V15.394L14.78 12.445V5.58198Z" fill="#213B41"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.0 KiB |
|
|
@ -0,0 +1,33 @@
|
|||
import { INode, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class LangWatch_Analytic implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs?: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'LangWatch'
|
||||
this.name = 'LangWatch'
|
||||
this.version = 1.0
|
||||
this.type = 'LangWatch'
|
||||
this.icon = 'LangWatch.svg'
|
||||
this.category = 'Analytic'
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = []
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['langwatchApi']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LangWatch_Analytic }
|
||||
|
|
@ -5,6 +5,7 @@ import { getBaseClasses } from '../../../src/utils'
|
|||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { getFileFromStorage } from '../../../src'
|
||||
|
||||
class OpenApiChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -64,12 +65,12 @@ class OpenApiChain_Chains implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
return await initChain(nodeData)
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
return await initChain(nodeData, options)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = await initChain(nodeData)
|
||||
const chain = await initChain(nodeData, options)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
|
@ -94,7 +95,7 @@ class OpenApiChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const initChain = async (nodeData: INodeData) => {
|
||||
const initChain = async (nodeData: INodeData, options: ICommonObject) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const headers = nodeData.inputs?.headers as string
|
||||
const yamlLink = nodeData.inputs?.yamlLink as string
|
||||
|
|
@ -105,10 +106,17 @@ const initChain = async (nodeData: INodeData) => {
|
|||
if (yamlLink) {
|
||||
yamlString = yamlLink
|
||||
} else {
|
||||
const splitDataURI = yamlFileBase64.split(',')
|
||||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
yamlString = bf.toString('utf-8')
|
||||
if (yamlFileBase64.startsWith('FILE-STORAGE::')) {
|
||||
const file = yamlFileBase64.replace('FILE-STORAGE::', '')
|
||||
const chatflowid = options.chatflowid
|
||||
const fileData = await getFileFromStorage(file, chatflowid)
|
||||
yamlString = fileData.toString()
|
||||
} else {
|
||||
const splitDataURI = yamlFileBase64.split(',')
|
||||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
yamlString = bf.toString('utf-8')
|
||||
}
|
||||
}
|
||||
|
||||
return await createOpenAPIChain(yamlString, {
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ class ConversationChain_Chains implements INode {
|
|||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory
|
||||
|
||||
const chain = prepareChain(nodeData, options, this.sessionId)
|
||||
const chain = await prepareChain(nodeData, options, this.sessionId)
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
|
|
@ -216,15 +216,15 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageConte
|
|||
return chatPrompt
|
||||
}
|
||||
|
||||
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
const chatHistory = options.chatHistory
|
||||
const prepareChain = async (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
let messageContent: MessageContentImageUrl[] = []
|
||||
if (llmSupportsVision(model)) {
|
||||
messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
|
@ -253,7 +253,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
|||
{
|
||||
[inputKey]: (input: { input: string }) => input.input,
|
||||
[memoryKey]: async () => {
|
||||
const history = await memory.getChatMessages(sessionId, true, chatHistory)
|
||||
const history = await memory.getChatMessages(sessionId, true, prependMessages)
|
||||
return history
|
||||
},
|
||||
...promptVariables
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { applyPatch } from 'fast-json-patch'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
|
|
@ -11,9 +12,18 @@ import { StringOutputParser } from '@langchain/core/output_parsers'
|
|||
import type { Document } from '@langchain/core/documents'
|
||||
import { BufferMemoryInput } from 'langchain/memory'
|
||||
import { ConversationalRetrievalQAChain } from 'langchain/chains'
|
||||
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
|
||||
import { getBaseClasses, mapChatMessageToBaseMessage } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
ICommonObject,
|
||||
IMessage,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeParams,
|
||||
IDatabaseEntity,
|
||||
MemoryMethods
|
||||
} from '../../../src/Interface'
|
||||
import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
|
||||
|
||||
type RetrievalChainInput = {
|
||||
|
|
@ -165,6 +175,11 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
const responsePrompt = nodeData.inputs?.responsePrompt as string
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const chatflowid = options.chatflowid as string
|
||||
|
||||
let customResponsePrompt = responsePrompt
|
||||
// If the deprecated systemMessagePrompt is still exists
|
||||
|
|
@ -178,7 +193,9 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
memory = new BufferMemory({
|
||||
returnMessages: true,
|
||||
memoryKey: 'chat_history',
|
||||
inputKey: 'input'
|
||||
appDataSource,
|
||||
databaseEntities,
|
||||
chatflowid
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -194,7 +211,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
}
|
||||
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
|
||||
|
||||
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
|
||||
const history = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const additionalCallback = await additionalCallbacks(nodeData, options)
|
||||
|
|
@ -367,31 +384,67 @@ const createChain = (
|
|||
return conversationalQAChain
|
||||
}
|
||||
|
||||
interface BufferMemoryExtendedInput {
|
||||
appDataSource: DataSource
|
||||
databaseEntities: IDatabaseEntity
|
||||
chatflowid: string
|
||||
}
|
||||
|
||||
class BufferMemory extends FlowiseMemory implements MemoryMethods {
|
||||
constructor(fields: BufferMemoryInput) {
|
||||
appDataSource: DataSource
|
||||
databaseEntities: IDatabaseEntity
|
||||
chatflowid: string
|
||||
|
||||
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
|
||||
super(fields)
|
||||
this.appDataSource = fields.appDataSource
|
||||
this.databaseEntities = fields.databaseEntities
|
||||
this.chatflowid = fields.chatflowid
|
||||
}
|
||||
|
||||
async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
|
||||
await this.chatHistory.clear()
|
||||
async getChatMessages(
|
||||
overrideSessionId = '',
|
||||
returnBaseMessages = false,
|
||||
prependMessages?: IMessage[]
|
||||
): Promise<IMessage[] | BaseMessage[]> {
|
||||
if (!overrideSessionId) return []
|
||||
|
||||
for (const msg of prevHistory) {
|
||||
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
|
||||
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
|
||||
const chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({
|
||||
where: {
|
||||
sessionId: overrideSessionId,
|
||||
chatflowid: this.chatflowid
|
||||
},
|
||||
order: {
|
||||
createdDate: 'ASC'
|
||||
}
|
||||
})
|
||||
|
||||
if (prependMessages?.length) {
|
||||
chatMessage.unshift(...prependMessages)
|
||||
}
|
||||
|
||||
const memoryResult = await this.loadMemoryVariables({})
|
||||
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
|
||||
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
|
||||
if (returnBaseMessages) {
|
||||
return mapChatMessageToBaseMessage(chatMessage)
|
||||
}
|
||||
|
||||
let returnIMessages: IMessage[] = []
|
||||
for (const m of chatMessage) {
|
||||
returnIMessages.push({
|
||||
message: m.content as string,
|
||||
type: m.role
|
||||
})
|
||||
}
|
||||
return returnIMessages
|
||||
}
|
||||
|
||||
async addChatMessages(): Promise<void> {
|
||||
// adding chat messages will be done on the fly in getChatMessages()
|
||||
// adding chat messages is done on server level
|
||||
return
|
||||
}
|
||||
|
||||
async clearChatMessages(): Promise<void> {
|
||||
await this.clear()
|
||||
// clearing chat messages is done on server level
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,9 @@ class LLMChain_Chains implements INode {
|
|||
})
|
||||
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// Disable streaming because its not final chain
|
||||
const disableStreaming = true
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData, disableStreaming)
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
@ -154,12 +156,13 @@ const runPrediction = async (
|
|||
input: string,
|
||||
promptValuesRaw: ICommonObject | undefined,
|
||||
options: ICommonObject,
|
||||
nodeData: INodeData
|
||||
nodeData: INodeData,
|
||||
disableStreaming?: boolean
|
||||
) => {
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const isStreaming = options.socketIO && options.socketIOClientId
|
||||
const isStreaming = !disableStreaming && options.socketIO && options.socketIOClientId
|
||||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
|
@ -184,7 +187,7 @@ const runPrediction = async (
|
|||
|
||||
if (llmSupportsVision(chain.llm)) {
|
||||
const visionChatModel = chain.llm as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
|
||||
const messageContent = await addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
||||
visionChatModel.setVisionModel()
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Include Tables',
|
||||
name: 'includesTables',
|
||||
type: 'string',
|
||||
description: 'Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables',
|
||||
description: 'Tables to include for queries, separated by comma. Can only use Include Tables or Ignore Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
@ -81,7 +81,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Ignore Tables',
|
||||
name: 'ignoreTables',
|
||||
type: 'string',
|
||||
description: 'Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables',
|
||||
description: 'Tables to ignore for queries, separated by comma. Can only use Ignore Tables or Include Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import { BedrockChat } from '@langchain/community/chat_models/bedrock'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { BedrockChat } from './FlowiseAWSChatBedrock'
|
||||
import { getModels, getRegions, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
/**
|
||||
* @author Michael Connor <mlconnor@yahoo.com>
|
||||
|
|
@ -23,7 +24,7 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS ChatBedrock'
|
||||
this.name = 'awsChatBedrock'
|
||||
this.version = 3.0
|
||||
this.version = 5.0
|
||||
this.type = 'AWSChatBedrock'
|
||||
this.icon = 'aws.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -46,59 +47,16 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
{
|
||||
label: 'Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'af-south-1', name: 'af-south-1' },
|
||||
{ label: 'ap-east-1', name: 'ap-east-1' },
|
||||
{ label: 'ap-northeast-1', name: 'ap-northeast-1' },
|
||||
{ label: 'ap-northeast-2', name: 'ap-northeast-2' },
|
||||
{ label: 'ap-northeast-3', name: 'ap-northeast-3' },
|
||||
{ label: 'ap-south-1', name: 'ap-south-1' },
|
||||
{ label: 'ap-south-2', name: 'ap-south-2' },
|
||||
{ label: 'ap-southeast-1', name: 'ap-southeast-1' },
|
||||
{ label: 'ap-southeast-2', name: 'ap-southeast-2' },
|
||||
{ label: 'ap-southeast-3', name: 'ap-southeast-3' },
|
||||
{ label: 'ap-southeast-4', name: 'ap-southeast-4' },
|
||||
{ label: 'ap-southeast-5', name: 'ap-southeast-5' },
|
||||
{ label: 'ap-southeast-6', name: 'ap-southeast-6' },
|
||||
{ label: 'ca-central-1', name: 'ca-central-1' },
|
||||
{ label: 'ca-west-1', name: 'ca-west-1' },
|
||||
{ label: 'cn-north-1', name: 'cn-north-1' },
|
||||
{ label: 'cn-northwest-1', name: 'cn-northwest-1' },
|
||||
{ label: 'eu-central-1', name: 'eu-central-1' },
|
||||
{ label: 'eu-central-2', name: 'eu-central-2' },
|
||||
{ label: 'eu-north-1', name: 'eu-north-1' },
|
||||
{ label: 'eu-south-1', name: 'eu-south-1' },
|
||||
{ label: 'eu-south-2', name: 'eu-south-2' },
|
||||
{ label: 'eu-west-1', name: 'eu-west-1' },
|
||||
{ label: 'eu-west-2', name: 'eu-west-2' },
|
||||
{ label: 'eu-west-3', name: 'eu-west-3' },
|
||||
{ label: 'il-central-1', name: 'il-central-1' },
|
||||
{ label: 'me-central-1', name: 'me-central-1' },
|
||||
{ label: 'me-south-1', name: 'me-south-1' },
|
||||
{ label: 'sa-east-1', name: 'sa-east-1' },
|
||||
{ label: 'us-east-1', name: 'us-east-1' },
|
||||
{ label: 'us-east-2', name: 'us-east-2' },
|
||||
{ label: 'us-gov-east-1', name: 'us-gov-east-1' },
|
||||
{ label: 'us-gov-west-1', name: 'us-gov-west-1' },
|
||||
{ label: 'us-west-1', name: 'us-west-1' },
|
||||
{ label: 'us-west-2', name: 'us-west-2' }
|
||||
],
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRegions',
|
||||
default: 'us-east-1'
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'anthropic.claude-3-sonnet', name: 'anthropic.claude-3-sonnet-20240229-v1:0' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
|
||||
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
|
||||
],
|
||||
default: 'anthropic.claude-v2'
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'anthropic.claude-3-haiku'
|
||||
},
|
||||
{
|
||||
label: 'Custom Model Name',
|
||||
|
|
@ -126,10 +84,29 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
optional: true,
|
||||
additionalParams: true,
|
||||
default: 200
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Only works with claude-3-* models when image is being uploaded from chat. Compatible with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'awsChatBedrock')
|
||||
},
|
||||
async listRegions(): Promise<INodeOptionsValue[]> {
|
||||
return await getRegions(MODEL_TYPE.CHAT, 'awsChatBedrock')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const iRegion = nodeData.inputs?.region as string
|
||||
const iModel = nodeData.inputs?.model as string
|
||||
|
|
@ -168,7 +145,16 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const amazonBedrock = new BedrockChat(obj)
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const amazonBedrock = new BedrockChat(nodeData.id, obj)
|
||||
if (obj.model.includes('anthropic.claude-3')) amazonBedrock.setMultiModalOption(multiModalOption)
|
||||
return amazonBedrock
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BedrockChat as LCBedrockChat } from '@langchain/community/chat_models/bedrock'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
import { IVisionChatModal, IMultiModalOption } from '../../../src'
|
||||
|
||||
export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields: BaseBedrockInput & BaseChatModelParams) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.model || ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
super.model = this.configuredModel
|
||||
super.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (!this.model.startsWith('claude-3')) {
|
||||
super.model = 'anthropic.claude-3-haiku-20240307-v1:0'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
|
||||
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class AzureChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -19,12 +21,12 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'Azure ChatOpenAI'
|
||||
this.name = 'azureChatOpenAI'
|
||||
this.version = 2.0
|
||||
this.version = 4.0
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -41,27 +43,8 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-35-turbo',
|
||||
name: 'gpt-35-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-35-turbo-16k',
|
||||
name: 'gpt-35-turbo-16k'
|
||||
}
|
||||
],
|
||||
default: 'gpt-35-turbo',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -79,6 +62,14 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
|
|
@ -102,10 +93,49 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Image Resolution',
|
||||
description: 'This parameter controls the resolution in which the model views the image.',
|
||||
name: 'imageResolution',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Low',
|
||||
name: 'low'
|
||||
},
|
||||
{
|
||||
label: 'High',
|
||||
name: 'high'
|
||||
},
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
}
|
||||
],
|
||||
default: 'low',
|
||||
optional: false,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'azureChatOpenAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
|
|
@ -115,6 +145,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const timeout = nodeData.inputs?.timeout as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
|
|
@ -122,6 +153,9 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
|
|
@ -137,8 +171,17 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false,
|
||||
imageResolution
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
|
||||
import { OpenAI } from 'llamaindex'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
interface AzureOpenAIConfig {
|
||||
apiKey?: string
|
||||
|
|
@ -9,6 +10,28 @@ interface AzureOpenAIConfig {
|
|||
deploymentName?: string
|
||||
}
|
||||
|
||||
const ALL_AZURE_OPENAI_CHAT_MODELS = {
|
||||
'gpt-35-turbo': { contextWindow: 4096, openAIModel: 'gpt-3.5-turbo' },
|
||||
'gpt-35-turbo-16k': {
|
||||
contextWindow: 16384,
|
||||
openAIModel: 'gpt-3.5-turbo-16k'
|
||||
},
|
||||
'gpt-4': { contextWindow: 8192, openAIModel: 'gpt-4' },
|
||||
'gpt-4-32k': { contextWindow: 32768, openAIModel: 'gpt-4-32k' },
|
||||
'gpt-4-turbo': {
|
||||
contextWindow: 128000,
|
||||
openAIModel: 'gpt-4-turbo'
|
||||
},
|
||||
'gpt-4-vision-preview': {
|
||||
contextWindow: 128000,
|
||||
openAIModel: 'gpt-4-vision-preview'
|
||||
},
|
||||
'gpt-4-1106-preview': {
|
||||
contextWindow: 128000,
|
||||
openAIModel: 'gpt-4-1106-preview'
|
||||
}
|
||||
}
|
||||
|
||||
class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -25,7 +48,7 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'AzureChatOpenAI'
|
||||
this.name = 'azureChatOpenAI_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -42,27 +65,9 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k',
|
||||
name: 'gpt-3.5-turbo-16k'
|
||||
}
|
||||
],
|
||||
default: 'gpt-3.5-turbo-16k',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'gpt-3.5-turbo-16k'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -99,8 +104,15 @@ class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'azureChatOpenAI_LlamaIndex')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AZURE_OPENAI_CHAT_MODELS
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatAnthropic } from './FlowiseChatAntrhopic'
|
||||
import { ChatAnthropic } from './FlowiseChatAnthropic'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class ChatAnthropic_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -20,7 +21,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatAnthropic'
|
||||
this.name = 'chatAnthropic'
|
||||
this.version = 4.0
|
||||
this.version = 6.0
|
||||
this.type = 'ChatAnthropic'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -42,80 +43,9 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'claude-3-opus',
|
||||
name: 'claude-3-opus-20240229',
|
||||
description: 'Most powerful model for highly complex tasks'
|
||||
},
|
||||
{
|
||||
label: 'claude-3-sonnet',
|
||||
name: 'claude-3-sonnet-20240229',
|
||||
description: 'Ideal balance of intelligence and speed for enterprise workloads'
|
||||
},
|
||||
{
|
||||
label: 'claude-2',
|
||||
name: 'claude-2',
|
||||
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
|
||||
},
|
||||
{
|
||||
label: 'claude-2.1',
|
||||
name: 'claude-2.1',
|
||||
description: 'Claude 2 latest full version'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-1',
|
||||
name: 'claude-instant-1',
|
||||
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1',
|
||||
name: 'claude-v1'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1-100k',
|
||||
name: 'claude-v1-100k'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1.0',
|
||||
name: 'claude-v1.0'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1.2',
|
||||
name: 'claude-v1.2'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1.3',
|
||||
name: 'claude-v1.3'
|
||||
},
|
||||
{
|
||||
label: 'claude-v1.3-100k',
|
||||
name: 'claude-v1.3-100k'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-v1',
|
||||
name: 'claude-instant-v1'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-v1-100k',
|
||||
name: 'claude-instant-v1-100k'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-v1.0',
|
||||
name: 'claude-instant-v1.0'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-v1.1',
|
||||
name: 'claude-instant-v1.1'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-v1.1-100k',
|
||||
name: 'claude-instant-v1.1-100k'
|
||||
}
|
||||
],
|
||||
default: 'claude-2',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'claude-3-haiku'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -161,10 +91,17 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatAnthropic')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
|
||||
const maxTokens = nodeData.inputs?.maxTokensToSample as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
|
@ -182,7 +119,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxTokensToSample) obj.maxTokensToSample = parseInt(maxTokensToSample, 10)
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { Anthropic } from 'llamaindex'
|
||||
|
||||
|
|
@ -18,7 +19,7 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatAnthropic'
|
||||
this.name = 'chatAnthropic_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.version = 3.0
|
||||
this.type = 'ChatAnthropic'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -35,31 +36,9 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'claude-3-opus',
|
||||
name: 'claude-3-opus-20240229',
|
||||
description: 'Most powerful model for highly complex tasks'
|
||||
},
|
||||
{
|
||||
label: 'claude-3-sonnet',
|
||||
name: 'claude-3-sonnet-20240229',
|
||||
description: 'Ideal balance of intelligence and speed for enterprise workloads'
|
||||
},
|
||||
{
|
||||
label: 'claude-2',
|
||||
name: 'claude-2',
|
||||
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-1',
|
||||
name: 'claude-instant-1',
|
||||
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
|
||||
}
|
||||
],
|
||||
default: 'claude-2',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'claude-3-haiku'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -88,9 +67,16 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatAnthropic_LlamaIndex')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined
|
||||
const modelName = nodeData.inputs?.modelName as 'claude-3-opus' | 'claude-3-sonnet' | 'claude-2.1' | 'claude-instant-1.2'
|
||||
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||
import { IVisionChatModal, IMultiModalOption } from '../../../src'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { IVisionChatModal, IMultiModalOption } from '../../../src'
|
||||
|
||||
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
|
|
@ -11,8 +11,8 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
|
|||
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName || 'claude-3-opus-20240229'
|
||||
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||
this.configuredModel = fields?.modelName || ''
|
||||
this.configuredMaxToken = fields?.maxTokens ?? 2048
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
|
|
@ -26,8 +26,8 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
|
|||
|
||||
setVisionModel(): void {
|
||||
if (!this.modelName.startsWith('claude-3')) {
|
||||
super.modelName = 'claude-3-opus-20240229'
|
||||
super.maxTokens = 1024
|
||||
super.modelName = 'claude-3-haiku-20240307'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 2048
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatBaiduWenxin } from '@langchain/community/chat_models/baiduwenxin'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatBaiduWenxin_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatBaiduWenxin'
|
||||
this.name = 'chatBaiduWenxin'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatBaiduWenxin'
|
||||
this.icon = 'baiduwenxin.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around BaiduWenxin Chat Endpoints'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatBaiduWenxin)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['baiduApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
placeholder: 'ERNIE-Bot-turbo'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const baiduApiKey = getCredentialParam('baiduApiKey', credentialData, nodeData)
|
||||
const baiduSecretKey = getCredentialParam('baiduSecretKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<ChatBaiduWenxin> = {
|
||||
streaming: true,
|
||||
baiduApiKey,
|
||||
baiduSecretKey,
|
||||
modelName,
|
||||
temperature: temperature ? parseFloat(temperature) : undefined
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatBaiduWenxin(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatBaiduWenxin_ChatModels }
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
aria-label="Baidu" role="img"
|
||||
viewBox="0 0 512 512"><rect
|
||||
width="512" height="512"
|
||||
rx="15%"
|
||||
fill="#ffffff"/><path d="m131 251c41-9 35-58 34-68-2-17-21-45-48-43-33 3-37 50-37 50-5 22 10 70 51 61m76-82c22 0 40-26 40-58s-18-58-40-58c-23 0-41 26-41 58s18 58 41 58m96 4c31 4 50-28 54-53 4-24-16-52-37-57s-48 29-50 52c-3 27 3 54 33 58m120 41c0-12-10-47-46-47s-41 33-41 57c0 22 2 53 47 52s40-51 40-62m-46 102s-46-36-74-75c-36-57-89-34-106-5-18 29-45 48-49 53-4 4-56 33-44 84 11 52 52 51 52 51s30 3 65-5 65 2 65 2 81 27 104-25c22-53-13-80-13-80" fill="#2319dc"/><path d="m214 266v34h-28s-29 3-39 35c-3 21 4 34 5 36 1 3 10 19 33 23h53v-128zm-1 107h-21s-15-1-19-18c-3-7 0-16 1-20 1-3 6-11 17-14h22zm38-70v68s1 17 24 23h61v-91h-26v68h-25s-8-1-10-7v-61z" fill="#ffffff"/></svg>
|
||||
|
After Width: | Height: | Size: 924 B |
|
|
@ -0,0 +1,87 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatCohere, ChatCohereInput } from '@langchain/cohere'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
|
||||
class ChatCohere_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatCohere'
|
||||
this.name = 'chatCohere'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatCohere'
|
||||
this.icon = 'Cohere.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Cohere Chat Endpoints'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatCohere)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['cohereApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'command-r'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.7,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatCohere')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: ChatCohereInput = {
|
||||
model: modelName,
|
||||
apiKey: cohereApiKey,
|
||||
temperature: temperature ? parseFloat(temperature) : undefined,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatCohere(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatCohere_ChatModels }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M11.776 18.304c.64 0 1.92-.032 3.712-.768 2.08-.864 6.176-2.4 9.152-4 2.08-1.12 2.976-2.592 2.976-4.576 0-2.72-2.208-4.96-4.96-4.96h-11.52A7.143 7.143 0 0 0 4 11.136c0 3.936 3.008 7.168 7.776 7.168Z" fill="#39594D"/><path fill-rule="evenodd" clip-rule="evenodd" d="M13.728 23.2c0-1.92 1.152-3.68 2.944-4.416l3.616-1.504C23.968 15.776 28 18.464 28 22.432A5.572 5.572 0 0 1 22.432 28h-3.936c-2.624 0-4.768-2.144-4.768-4.8Z" fill="#D18EE2"/><path d="M8.128 19.232A4.138 4.138 0 0 0 4 23.36v.544C4 26.144 5.856 28 8.128 28a4.138 4.138 0 0 0 4.128-4.128v-.544c-.032-2.24-1.856-4.096-4.128-4.096Z" fill="#FF7759"/></svg>
|
||||
|
After Width: | Height: | Size: 738 B |
|
|
@ -0,0 +1,79 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatFireworks } from '@langchain/community/chat_models/fireworks'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatFireworks_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatFireworks'
|
||||
this.name = 'chatFireworks'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatFireworks'
|
||||
this.icon = 'Fireworks.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Fireworks Chat Endpoints'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatFireworks)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['fireworksApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'accounts/fireworks/models/llama-v2-13b-chat',
|
||||
placeholder: 'accounts/fireworks/models/llama-v2-13b-chat'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const fireworksApiKey = getCredentialParam('fireworksApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<ChatFireworks> = {
|
||||
fireworksApiKey,
|
||||
model: modelName,
|
||||
modelName,
|
||||
temperature: temperature ? parseFloat(temperature) : undefined
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatFireworks(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatFireworks_ChatModels }
|
||||
|
After Width: | Height: | Size: 6.6 KiB |
|
|
@ -1,9 +1,10 @@
|
|||
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
|
||||
import type { SafetySetting } from '@google/generative-ai'
|
||||
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI'
|
||||
|
||||
class GoogleGenerativeAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -20,7 +21,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleGenerativeAI'
|
||||
this.name = 'chatGoogleGenerativeAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatGoogleGenerativeAI'
|
||||
this.icon = 'GoogleGemini.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -44,13 +45,8 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gemini-pro',
|
||||
name: 'gemini-pro'
|
||||
}
|
||||
],
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'gemini-pro'
|
||||
},
|
||||
{
|
||||
|
|
@ -143,10 +139,26 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
],
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Automatically uses vision model when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatGoogleGenerativeAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData)
|
||||
|
|
@ -159,20 +171,21 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
const harmCategory = nodeData.inputs?.harmCategory as string
|
||||
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const obj: Partial<GoogleGenerativeAIChatInput> = {
|
||||
apiKey: apiKey,
|
||||
modelName: modelName,
|
||||
maxOutputTokens: 2048
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
|
||||
const model = new ChatGoogleGenerativeAI(obj)
|
||||
if (topP) model.topP = parseFloat(topP)
|
||||
if (topK) model.topK = parseFloat(topK)
|
||||
if (cache) model.cache = cache
|
||||
if (temperature) model.temperature = parseFloat(temperature)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
if (temperature) obj.temperature = parseFloat(temperature)
|
||||
|
||||
// Safety Settings
|
||||
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
|
||||
|
|
@ -185,7 +198,16 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
threshold: harmBlockThresholds[index] as HarmBlockThreshold
|
||||
}
|
||||
})
|
||||
if (safetySettings.length > 0) model.safetySettings = safetySettings
|
||||
if (safetySettings.length > 0) obj.safetySettings = safetySettings
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatGoogleGenerativeAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
|
||||
return model
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,581 @@
|
|||
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContent } from '@langchain/core/messages'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
import { BaseChatModel, type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
|
||||
import { ToolCall } from '@langchain/core/messages/tool'
|
||||
import { NewTokenIndices } from '@langchain/core/callbacks/base'
|
||||
import {
|
||||
EnhancedGenerateContentResponse,
|
||||
Content,
|
||||
Part,
|
||||
Tool,
|
||||
GenerativeModel,
|
||||
GoogleGenerativeAI as GenerativeAI
|
||||
} from '@google/generative-ai'
|
||||
import type { SafetySetting } from '@google/generative-ai'
|
||||
import { ICommonObject, IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
import { StructuredToolInterface } from '@langchain/core/tools'
|
||||
import { isStructuredTool } from '@langchain/core/utils/function_calling'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
|
||||
interface TokenUsage {
|
||||
completionTokens?: number
|
||||
promptTokens?: number
|
||||
totalTokens?: number
|
||||
}
|
||||
|
||||
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
|
||||
modelName?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxOutputTokens?: number
|
||||
topP?: number
|
||||
topK?: number
|
||||
stopSequences?: string[]
|
||||
safetySettings?: SafetySetting[]
|
||||
apiKey?: string
|
||||
streaming?: boolean
|
||||
}
|
||||
|
||||
class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGenerativeAIChatInput {
|
||||
modelName = 'gemini-pro'
|
||||
|
||||
temperature?: number
|
||||
|
||||
maxOutputTokens?: number
|
||||
|
||||
topP?: number
|
||||
|
||||
topK?: number
|
||||
|
||||
stopSequences: string[] = []
|
||||
|
||||
safetySettings?: SafetySetting[]
|
||||
|
||||
apiKey?: string
|
||||
|
||||
streaming = false
|
||||
|
||||
private client: GenerativeModel
|
||||
|
||||
get _isMultimodalModel() {
|
||||
return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5')
|
||||
}
|
||||
|
||||
constructor(fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields ?? {})
|
||||
|
||||
this.modelName = fields?.model?.replace(/^models\//, '') ?? fields?.modelName?.replace(/^models\//, '') ?? 'gemini-pro'
|
||||
|
||||
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens
|
||||
|
||||
if (this.maxOutputTokens && this.maxOutputTokens < 0) {
|
||||
throw new Error('`maxOutputTokens` must be a positive integer')
|
||||
}
|
||||
|
||||
this.temperature = fields?.temperature ?? this.temperature
|
||||
if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
|
||||
throw new Error('`temperature` must be in the range of [0.0,1.0]')
|
||||
}
|
||||
|
||||
this.topP = fields?.topP ?? this.topP
|
||||
if (this.topP && this.topP < 0) {
|
||||
throw new Error('`topP` must be a positive integer')
|
||||
}
|
||||
|
||||
if (this.topP && this.topP > 1) {
|
||||
throw new Error('`topP` must be below 1.')
|
||||
}
|
||||
|
||||
this.topK = fields?.topK ?? this.topK
|
||||
if (this.topK && this.topK < 0) {
|
||||
throw new Error('`topK` must be a positive integer')
|
||||
}
|
||||
|
||||
this.stopSequences = fields?.stopSequences ?? this.stopSequences
|
||||
|
||||
this.apiKey = fields?.apiKey ?? process.env['GOOGLE_API_KEY']
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for Google GenerativeAI ' +
|
||||
'in the environment variable GOOGLE_API_KEY ' +
|
||||
'or in the `apiKey` field of the ' +
|
||||
'ChatGoogleGenerativeAI constructor'
|
||||
)
|
||||
}
|
||||
|
||||
this.safetySettings = fields?.safetySettings ?? this.safetySettings
|
||||
if (this.safetySettings && this.safetySettings.length > 0) {
|
||||
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category))
|
||||
if (safetySettingsSet.size !== this.safetySettings.length) {
|
||||
throw new Error('The categories in `safetySettings` array must be unique')
|
||||
}
|
||||
}
|
||||
|
||||
this.streaming = fields?.streaming ?? this.streaming
|
||||
|
||||
this.getClient()
|
||||
}
|
||||
|
||||
getClient(tools?: Tool[]) {
|
||||
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({
|
||||
model: this.modelName,
|
||||
tools,
|
||||
safetySettings: this.safetySettings as SafetySetting[],
|
||||
generationConfig: {
|
||||
candidateCount: 1,
|
||||
stopSequences: this.stopSequences,
|
||||
maxOutputTokens: this.maxOutputTokens,
|
||||
temperature: this.temperature,
|
||||
topP: this.topP,
|
||||
topK: this.topK
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
_combineLLMOutput() {
|
||||
return []
|
||||
}
|
||||
|
||||
_llmType() {
|
||||
return 'googlegenerativeai'
|
||||
}
|
||||
|
||||
override bindTools(tools: (StructuredToolInterface | Record<string, unknown>)[], kwargs?: Partial<ICommonObject>) {
|
||||
//@ts-ignore
|
||||
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs })
|
||||
}
|
||||
|
||||
convertFunctionResponse(prompts: Content[]) {
|
||||
for (let i = 0; i < prompts.length; i += 1) {
|
||||
if (prompts[i].role === 'function') {
|
||||
if (prompts[i - 1].role === 'model') {
|
||||
const toolName = prompts[i - 1].parts[0].functionCall?.name ?? ''
|
||||
prompts[i].parts = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: toolName,
|
||||
response: {
|
||||
name: toolName,
|
||||
content: prompts[i].parts[0].text
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _generateNonStreaming(
|
||||
prompt: Content[],
|
||||
options: this['ParsedCallOptions'],
|
||||
_runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
//@ts-ignore
|
||||
const tools = options.tools ?? []
|
||||
|
||||
this.convertFunctionResponse(prompt)
|
||||
|
||||
if (tools.length > 0) {
|
||||
this.getClient(tools)
|
||||
} else {
|
||||
this.getClient()
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
let output
|
||||
try {
|
||||
output = await this.client.generateContent({
|
||||
contents: prompt
|
||||
})
|
||||
} catch (e: any) {
|
||||
if (e.message?.includes('400 Bad Request')) {
|
||||
e.status = 400
|
||||
}
|
||||
throw e
|
||||
}
|
||||
return output
|
||||
})
|
||||
const generationResult = mapGenerateContentResultToChatResult(res.response)
|
||||
await _runManager?.handleLLMNewToken(generationResult.generations?.length ? generationResult.generations[0].text : '')
|
||||
return generationResult
|
||||
}
|
||||
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
prompt = checkIfEmptyContentAndSameRole(prompt)
|
||||
|
||||
// Handle streaming
|
||||
if (this.streaming) {
|
||||
const tokenUsage: TokenUsage = {}
|
||||
const stream = this._streamResponseChunks(messages, options, runManager)
|
||||
const finalChunks: Record<number, ChatGenerationChunk> = {}
|
||||
for await (const chunk of stream) {
|
||||
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
|
||||
if (finalChunks[index] === undefined) {
|
||||
finalChunks[index] = chunk
|
||||
} else {
|
||||
finalChunks[index] = finalChunks[index].concat(chunk)
|
||||
}
|
||||
}
|
||||
const generations = Object.entries(finalChunks)
|
||||
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
||||
.map(([_, value]) => value)
|
||||
|
||||
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
|
||||
}
|
||||
return this._generateNonStreaming(prompt, options, runManager)
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
prompt = checkIfEmptyContentAndSameRole(prompt)
|
||||
|
||||
//@ts-ignore
|
||||
if (options.tools !== undefined && options.tools.length > 0) {
|
||||
const result = await this._generateNonStreaming(prompt, options, runManager)
|
||||
const generationMessage = result.generations[0].message as AIMessage
|
||||
if (generationMessage === undefined) {
|
||||
throw new Error('Could not parse Groq output.')
|
||||
}
|
||||
const toolCallChunks = generationMessage.tool_calls?.map((toolCall, i) => ({
|
||||
name: toolCall.name,
|
||||
args: JSON.stringify(toolCall.args),
|
||||
id: toolCall.id,
|
||||
index: i
|
||||
}))
|
||||
yield new ChatGenerationChunk({
|
||||
message: new AIMessageChunk({
|
||||
content: generationMessage.content,
|
||||
additional_kwargs: generationMessage.additional_kwargs,
|
||||
tool_call_chunks: toolCallChunks
|
||||
}),
|
||||
text: generationMessage.tool_calls?.length ? '' : (generationMessage.content as string)
|
||||
})
|
||||
} else {
|
||||
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
this.getClient()
|
||||
const { stream } = await this.client.generateContentStream({
|
||||
contents: prompt
|
||||
})
|
||||
return stream
|
||||
})
|
||||
|
||||
for await (const response of stream) {
|
||||
const chunk = convertResponseContentToChatGenerationChunk(response)
|
||||
if (!chunk) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield chunk
|
||||
await runManager?.handleLLMNewToken(chunk.text ?? '')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxOutputTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
super.modelName = this.configuredModel
|
||||
super.maxOutputTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (this.modelName !== 'gemini-pro-vision' && this.modelName !== 'gemini-1.5-pro-latest') {
|
||||
super.modelName = 'gemini-1.5-pro-latest'
|
||||
super.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : 8192
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType()
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role
|
||||
}
|
||||
return message.name ?? type
|
||||
}
|
||||
|
||||
function convertAuthorToRole(author: string) {
|
||||
switch (author) {
|
||||
/**
|
||||
* Note: Gemini currently is not supporting system messages
|
||||
* we will convert them to human messages and merge with following
|
||||
* */
|
||||
case 'ai':
|
||||
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
||||
return 'model'
|
||||
case 'system':
|
||||
case 'human':
|
||||
return 'user'
|
||||
case 'function':
|
||||
case 'tool':
|
||||
return 'function'
|
||||
default:
|
||||
// Instead of throwing, we return model
|
||||
// throw new Error(`Unknown / unsupported author: ${author}`)
|
||||
return 'model'
|
||||
}
|
||||
}
|
||||
|
||||
function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[] {
|
||||
if (typeof content === 'string') {
|
||||
return [{ text: content }]
|
||||
}
|
||||
|
||||
return content.map((c) => {
|
||||
if (c.type === 'text') {
|
||||
return {
|
||||
text: c.text
|
||||
}
|
||||
}
|
||||
|
||||
if (c.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: c.functionCall
|
||||
}
|
||||
}
|
||||
|
||||
/*if (c.type === "tool_use" || c.type === "tool_result") {
|
||||
// TODO: Fix when SDK types are fixed
|
||||
return {
|
||||
...contentPart,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any;
|
||||
}*/
|
||||
|
||||
if (c.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`)
|
||||
}
|
||||
let source
|
||||
if (typeof c.image_url === 'string') {
|
||||
source = c.image_url
|
||||
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
|
||||
source = c.image_url.url
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
const [dm, data] = source.split(',')
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a dedicated logic for Multi Agent Supervisor to handle the case where the content is empty, and the role is the same
|
||||
*/
|
||||
|
||||
function checkIfEmptyContentAndSameRole(contents: Content[]) {
|
||||
let prevRole = ''
|
||||
const removedContents: Content[] = []
|
||||
for (const content of contents) {
|
||||
const role = content.role
|
||||
if (content.parts.length && content.parts[0].text === '' && role === prevRole) {
|
||||
removedContents.push(content)
|
||||
}
|
||||
|
||||
prevRole = role
|
||||
}
|
||||
|
||||
return contents.filter((content) => !removedContents.includes(content))
|
||||
}
|
||||
|
||||
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
|
||||
return messages.reduce<{
|
||||
content: Content[]
|
||||
mergeWithPreviousContent: boolean
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input')
|
||||
}
|
||||
const author = getMessageAuthor(message)
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one')
|
||||
}
|
||||
const role = convertAuthorToRole(author)
|
||||
|
||||
const prevContent = acc.content[acc.content.length]
|
||||
if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) {
|
||||
throw new Error('Google Generative AI requires alternate messages between authors')
|
||||
}
|
||||
|
||||
const parts = convertMessageContentToParts(message.content, isMultimodalModel)
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1]
|
||||
if (!prevContent) {
|
||||
throw new Error('There was a problem parsing your system message. Please try a prompt without one.')
|
||||
}
|
||||
prevContent.parts.push(...parts)
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content
|
||||
}
|
||||
}
|
||||
const content: Content = {
|
||||
role,
|
||||
parts
|
||||
}
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system',
|
||||
content: [...acc.content, content]
|
||||
}
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content
|
||||
}
|
||||
|
||||
function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult {
|
||||
// if rejected or error, return empty generations with reason in filters
|
||||
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
|
||||
return {
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
filters: response?.promptFeedback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content.parts.map(({ text }) => text).join('')
|
||||
|
||||
if (content.parts.some((part) => part.functionCall)) {
|
||||
const toolCalls: ToolCall[] = []
|
||||
for (const fcPart of content.parts) {
|
||||
const fc = fcPart.functionCall
|
||||
if (fc) {
|
||||
const { name, args } = fc
|
||||
toolCalls.push({ name, args })
|
||||
}
|
||||
}
|
||||
|
||||
const functionCalls = toolCalls.map((tool) => {
|
||||
return { functionCall: { name: tool.name, args: tool.args }, type: 'tool_use' }
|
||||
})
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: functionCalls,
|
||||
name: !content ? undefined : content.role,
|
||||
additional_kwargs: generationInfo,
|
||||
tool_calls: toolCalls
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
return {
|
||||
generations: [generation]
|
||||
}
|
||||
} else {
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
additional_kwargs: generationInfo
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
||||
return {
|
||||
generations: [generation]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null {
|
||||
if (!response.candidates || response.candidates.length === 0) {
|
||||
return null
|
||||
}
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content?.parts[0]?.text ?? ''
|
||||
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {}
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
}
|
||||
|
||||
function zodToGeminiParameters(zodObj: any) {
|
||||
// Gemini doesn't accept either the $schema or additionalProperties
|
||||
// attributes, so we need to explicitly remove them.
|
||||
const jsonSchema: any = zodToJsonSchema(zodObj)
|
||||
// eslint-disable-next-line unused-imports/no-unused-vars
|
||||
const { $schema, additionalProperties, ...rest } = jsonSchema
|
||||
if (rest.properties) {
|
||||
Object.keys(rest.properties).forEach((key) => {
|
||||
if (rest.properties[key].enum?.length) {
|
||||
rest.properties[key] = { type: 'string', format: 'enum', enum: rest.properties[key].enum }
|
||||
}
|
||||
})
|
||||
}
|
||||
return rest
|
||||
}
|
||||
|
||||
function convertToGeminiTools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]) {
|
||||
return [
|
||||
{
|
||||
functionDeclarations: structuredTools.map((structuredTool) => {
|
||||
if (isStructuredTool(structuredTool)) {
|
||||
const jsonSchema = zodToGeminiParameters(structuredTool.schema)
|
||||
return {
|
||||
name: structuredTool.name,
|
||||
description: structuredTool.description,
|
||||
parameters: jsonSchema
|
||||
}
|
||||
}
|
||||
return structuredTool
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
import { ChatGooglePaLM, GooglePaLMChatInput } from '@langchain/community/chat_models/googlepalm'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class ChatGooglePaLM_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGooglePaLM'
|
||||
this.name = 'chatGooglePaLM'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ChatGooglePaLM'
|
||||
this.icon = 'GooglePaLM.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -40,15 +41,9 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'models/chat-bison-001',
|
||||
name: 'models/chat-bison-001'
|
||||
}
|
||||
],
|
||||
default: 'models/chat-bison-001',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'models/chat-bison-001'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -98,6 +93,13 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatGooglePaLM')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { GoogleAuthOptions } from 'google-auth-library'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatGoogleVertexAI, GoogleVertexAIChatInput } from '@langchain/community/chat_models/googlevertexai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ChatVertexAI, ChatVertexAIInput } from '@langchain/google-vertexai'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class GoogleVertexAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -19,12 +19,12 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleVertexAI'
|
||||
this.name = 'chatGoogleVertexAI'
|
||||
this.version = 2.0
|
||||
this.version = 4.0
|
||||
this.type = 'ChatGoogleVertexAI'
|
||||
this.icon = 'GoogleVertex.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around VertexAI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatGoogleVertexAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatVertexAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -44,27 +44,9 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'chat-bison',
|
||||
name: 'chat-bison'
|
||||
},
|
||||
{
|
||||
label: 'codechat-bison',
|
||||
name: 'codechat-bison'
|
||||
},
|
||||
{
|
||||
label: 'chat-bison-32k',
|
||||
name: 'chat-bison-32k'
|
||||
},
|
||||
{
|
||||
label: 'codechat-bison-32k',
|
||||
name: 'codechat-bison-32k'
|
||||
}
|
||||
],
|
||||
default: 'chat-bison',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'chat-bison'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -89,17 +71,33 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Next Highest Probability Tokens',
|
||||
name: 'topK',
|
||||
type: 'number',
|
||||
description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`,
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatGoogleVertexAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const googleApplicationCredentialFilePath = getCredentialParam('googleApplicationCredentialFilePath', credentialData, nodeData)
|
||||
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
|
||||
const projectID = getCredentialParam('projectID', credentialData, nodeData)
|
||||
|
||||
const authOptions: GoogleAuthOptions = {}
|
||||
const authOptions: ICommonObject = {}
|
||||
if (Object.keys(credentialData).length !== 0) {
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error('Please specify your Google Application Credential')
|
||||
|
|
@ -121,8 +119,9 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
|
||||
const obj: GoogleVertexAIChatInput<GoogleAuthOptions> = {
|
||||
const obj: ChatVertexAIInput = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName
|
||||
}
|
||||
|
|
@ -131,8 +130,9 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (cache) obj.cache = cache
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
|
||||
const model = new ChatGoogleVertexAI(obj)
|
||||
const model = new ChatVertexAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatHuggingFace'
|
||||
this.name = 'chatHuggingFace'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ChatHuggingFace'
|
||||
this.icon = 'HuggingFace.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -42,8 +42,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
name: 'model',
|
||||
type: 'string',
|
||||
description: 'If using own inference endpoint, leave this blank',
|
||||
placeholder: 'gpt2',
|
||||
optional: true
|
||||
placeholder: 'gpt2'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
|
|
@ -97,6 +96,16 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Stop Sequence',
|
||||
name: 'stop',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description: 'Sets the stop sequences to use. Use comma to seperate different sequences.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -110,6 +119,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
|
||||
const endpoint = nodeData.inputs?.endpoint as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const stop = nodeData.inputs?.stop as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
|
@ -124,7 +134,11 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (hfTopK) obj.topK = parseFloat(hfTopK)
|
||||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (endpoint) obj.endpoint = endpoint
|
||||
if (endpoint) obj.endpointUrl = endpoint
|
||||
if (stop) {
|
||||
const stopSequences = stop.split(',')
|
||||
obj.stopSequences = stopSequences
|
||||
}
|
||||
|
||||
const huggingFace = new HuggingFaceInference(obj)
|
||||
if (cache) huggingFace.cache = cache
|
||||
|
|
|
|||
|
|
@ -1,32 +1,19 @@
|
|||
import { LLM, BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { getEnvironmentVariable } from '../../../src/utils'
|
||||
import { GenerationChunk } from '@langchain/core/outputs'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
|
||||
export interface HFInput {
|
||||
/** Model to use */
|
||||
model: string
|
||||
|
||||
/** Sampling temperature to use */
|
||||
temperature?: number
|
||||
|
||||
/**
|
||||
* Maximum number of tokens to generate in the completion.
|
||||
*/
|
||||
maxTokens?: number
|
||||
|
||||
/** Total probability mass of tokens to consider at each step */
|
||||
stopSequences?: string[]
|
||||
topP?: number
|
||||
|
||||
/** Integer to define the top tokens considered within the sample operation to create new text. */
|
||||
topK?: number
|
||||
|
||||
/** Penalizes repeated tokens according to frequency */
|
||||
frequencyPenalty?: number
|
||||
|
||||
/** API key to use. */
|
||||
apiKey?: string
|
||||
|
||||
/** Private endpoint to use. */
|
||||
endpoint?: string
|
||||
endpointUrl?: string
|
||||
includeCredentials?: string | boolean
|
||||
}
|
||||
|
||||
export class HuggingFaceInference extends LLM implements HFInput {
|
||||
|
|
@ -40,6 +27,8 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
|
||||
temperature: number | undefined = undefined
|
||||
|
||||
stopSequences: string[] | undefined = undefined
|
||||
|
||||
maxTokens: number | undefined = undefined
|
||||
|
||||
topP: number | undefined = undefined
|
||||
|
|
@ -50,7 +39,9 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
|
||||
apiKey: string | undefined = undefined
|
||||
|
||||
endpoint: string | undefined = undefined
|
||||
endpointUrl: string | undefined = undefined
|
||||
|
||||
includeCredentials: string | boolean | undefined = undefined
|
||||
|
||||
constructor(fields?: Partial<HFInput> & BaseLLMParams) {
|
||||
super(fields ?? {})
|
||||
|
|
@ -58,11 +49,13 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
this.model = fields?.model ?? this.model
|
||||
this.temperature = fields?.temperature ?? this.temperature
|
||||
this.maxTokens = fields?.maxTokens ?? this.maxTokens
|
||||
this.stopSequences = fields?.stopSequences ?? this.stopSequences
|
||||
this.topP = fields?.topP ?? this.topP
|
||||
this.topK = fields?.topK ?? this.topK
|
||||
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty
|
||||
this.endpoint = fields?.endpoint ?? ''
|
||||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpointUrl = fields?.endpointUrl
|
||||
this.includeCredentials = fields?.includeCredentials
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
|
||||
|
|
@ -74,31 +67,65 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
return 'hf'
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hf = new HfInference(this.apiKey)
|
||||
const obj: any = {
|
||||
invocationParams(options?: this['ParsedCallOptions']) {
|
||||
return {
|
||||
model: this.model,
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
return_full_text: false,
|
||||
temperature: this.temperature,
|
||||
max_new_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP,
|
||||
top_k: this.topK,
|
||||
repetition_penalty: this.frequencyPenalty
|
||||
},
|
||||
inputs: prompt
|
||||
}
|
||||
}
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
obj.model = this.model
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
prompt: string,
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<GenerationChunk> {
|
||||
const hfi = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
hfi.textGenerationStream({
|
||||
...this.invocationParams(options),
|
||||
inputs: prompt
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.token.text
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token ?? '')
|
||||
|
||||
// stream is done
|
||||
if (chunk.generated_text)
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), obj)
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const hfi = await this._prepareHFInference()
|
||||
const args = { ...this.invocationParams(options), inputs: prompt }
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfi.textGeneration.bind(hfi), args)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
private async _prepareHFInference() {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hfi = new HfInference(this.apiKey, {
|
||||
includeCredentials: this.includeCredentials
|
||||
})
|
||||
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
static async imports(): Promise<{
|
||||
HfInference: typeof import('@huggingface/inference').HfInference
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatMistralAI, ChatMistralAIInput } from '@langchain/mistralai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class ChatMistral_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class ChatMistral_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatMistralAI'
|
||||
this.name = 'chatMistralAI'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ChatMistralAI'
|
||||
this.icon = 'MistralAI.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -40,9 +41,8 @@ class ChatMistral_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
description:
|
||||
'Refer to <a target="_blank" href="https://docs.mistral.ai/guides/model-selection/">Model Selection</a> for more available models',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'mistral-tiny'
|
||||
},
|
||||
{
|
||||
|
|
@ -101,6 +101,13 @@ class ChatMistral_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatMistralAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('mistralAIAPIKey', credentialData, nodeData)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,100 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ALL_AVAILABLE_MISTRAL_MODELS, MistralAI } from 'llamaindex'
|
||||
|
||||
class ChatMistral_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatMistral'
|
||||
this.name = 'chatMistral_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatMistral'
|
||||
this.icon = 'MistralAI.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around ChatMistral LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(MistralAI)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['mistralAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'mistral-tiny'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokensToSample',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top P',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatMistral_LlamaIndex')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_MISTRAL_MODELS
|
||||
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('mistralAIAPIKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<MistralAI> = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
apiKey: apiKey
|
||||
}
|
||||
|
||||
if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
|
||||
const model = new MistralAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatMistral_LlamaIndex_ChatModels }
|
||||
|
|
@ -0,0 +1,221 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { OllamaParams, Ollama } from 'llamaindex'
|
||||
|
||||
class ChatOllama_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatOllama'
|
||||
this.name = 'chatOllama_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatOllama'
|
||||
this.icon = 'Ollama.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around ChatOllama LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Ollama)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Base URL',
|
||||
name: 'baseUrl',
|
||||
type: 'string',
|
||||
default: 'http://localhost:11434'
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
placeholder: 'llama3'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
description:
|
||||
'The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top P',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
description:
|
||||
'Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
type: 'number',
|
||||
description:
|
||||
'Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat',
|
||||
name: 'mirostat',
|
||||
type: 'number',
|
||||
description:
|
||||
'Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat ETA',
|
||||
name: 'mirostatEta',
|
||||
type: 'number',
|
||||
description:
|
||||
'Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat TAU',
|
||||
name: 'mirostatTau',
|
||||
type: 'number',
|
||||
description:
|
||||
'Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Context Window Size',
|
||||
name: 'numCtx',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets the size of the context window used to generate the next token. (Default: 2048) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Number of GPU',
|
||||
name: 'numGpu',
|
||||
type: 'number',
|
||||
description:
|
||||
'The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Number of Thread',
|
||||
name: 'numThread',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Repeat Last N',
|
||||
name: 'repeatLastN',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Repeat Penalty',
|
||||
name: 'repeatPenalty',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Stop Sequence',
|
||||
name: 'stop',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description:
|
||||
'Sets the stop sequences to use. Use comma to seperate different sequences. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Tail Free Sampling',
|
||||
name: 'tfsZ',
|
||||
type: 'number',
|
||||
description:
|
||||
'Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (Default: 1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const baseUrl = nodeData.inputs?.baseUrl as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const mirostat = nodeData.inputs?.mirostat as string
|
||||
const mirostatEta = nodeData.inputs?.mirostatEta as string
|
||||
const mirostatTau = nodeData.inputs?.mirostatTau as string
|
||||
const numCtx = nodeData.inputs?.numCtx as string
|
||||
const numGpu = nodeData.inputs?.numGpu as string
|
||||
const numThread = nodeData.inputs?.numThread as string
|
||||
const repeatLastN = nodeData.inputs?.repeatLastN as string
|
||||
const repeatPenalty = nodeData.inputs?.repeatPenalty as string
|
||||
const stop = nodeData.inputs?.stop as string
|
||||
const tfsZ = nodeData.inputs?.tfsZ as string
|
||||
|
||||
const obj: OllamaParams = {
|
||||
model: modelName,
|
||||
options: {},
|
||||
config: {
|
||||
host: baseUrl
|
||||
}
|
||||
}
|
||||
|
||||
if (temperature) obj.options.temperature = parseFloat(temperature)
|
||||
if (topP) obj.options.top_p = parseFloat(topP)
|
||||
if (topK) obj.options.top_k = parseFloat(topK)
|
||||
if (mirostat) obj.options.mirostat = parseFloat(mirostat)
|
||||
if (mirostatEta) obj.options.mirostat_eta = parseFloat(mirostatEta)
|
||||
if (mirostatTau) obj.options.mirostat_tau = parseFloat(mirostatTau)
|
||||
if (numCtx) obj.options.num_ctx = parseFloat(numCtx)
|
||||
if (numGpu) obj.options.main_gpu = parseFloat(numGpu)
|
||||
if (numThread) obj.options.num_thread = parseFloat(numThread)
|
||||
if (repeatLastN) obj.options.repeat_last_n = parseFloat(repeatLastN)
|
||||
if (repeatPenalty) obj.options.repeat_penalty = parseFloat(repeatPenalty)
|
||||
if (tfsZ) obj.options.tfs_z = parseFloat(tfsZ)
|
||||
if (stop) {
|
||||
const stopSequences = stop.split(',')
|
||||
obj.options.stop = stopSequences
|
||||
}
|
||||
|
||||
const model = new Ollama(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatOllama_LlamaIndex_ChatModels }
|
||||
|
|
@ -0,0 +1,808 @@
|
|||
import { HumanMessage, AIMessage, BaseMessage, AIMessageChunk, ChatMessage } from '@langchain/core/messages'
|
||||
import { ChatResult } from '@langchain/core/outputs'
|
||||
import { SimpleChatModel, BaseChatModel, BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { SystemMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { type StructuredToolInterface } from '@langchain/core/tools'
|
||||
import type { BaseFunctionCallOptions, BaseLanguageModelInput } from '@langchain/core/language_models/base'
|
||||
import { convertToOpenAIFunction } from '@langchain/core/utils/function_calling'
|
||||
import { RunnableInterface } from '@langchain/core/runnables'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import type { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
import { ChatGenerationChunk } from '@langchain/core/outputs'
|
||||
import type { StringWithAutocomplete } from '@langchain/core/utils/types'
|
||||
import { createOllamaChatStream, createOllamaGenerateStream, type OllamaInput, type OllamaMessage } from './utils'
|
||||
|
||||
const DEFAULT_TOOL_SYSTEM_TEMPLATE = `You have access to the following tools:
|
||||
{tools}
|
||||
You must always select one of the above tools and respond with only a JSON object matching the following schema:
|
||||
{{
|
||||
"tool": <name of the selected tool>,
|
||||
"tool_input": <parameters for the selected tool, matching the tool's JSON schema>
|
||||
}}`
|
||||
|
||||
class ChatOllamaFunction_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
badge?: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatOllama Function'
|
||||
this.name = 'chatOllamaFunction'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatOllamaFunction'
|
||||
this.icon = 'Ollama.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Run open-source function-calling compatible LLM on Ollama'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(OllamaFunctions)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
name: 'baseUrl',
|
||||
type: 'string',
|
||||
default: 'http://localhost:11434'
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
description: 'Only compatible with function calling model like mistral',
|
||||
placeholder: 'mistral'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
description:
|
||||
'The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Tool System Prompt',
|
||||
name: 'toolSystemPromptTemplate',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
description: `Under the hood, Ollama's JSON mode is being used to constrain output to JSON. Output JSON will contains two keys: tool and tool_input fields. We then parse it to execute the tool. Because different models have different strengths, it may be helpful to pass in your own system prompt.`,
|
||||
warning: `Prompt must always contains {tools} and instructions to respond with a JSON object with tool and tool_input fields`,
|
||||
default: DEFAULT_TOOL_SYSTEM_TEMPLATE,
|
||||
placeholder: DEFAULT_TOOL_SYSTEM_TEMPLATE,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top P',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
description:
|
||||
'Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
type: 'number',
|
||||
description:
|
||||
'Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat',
|
||||
name: 'mirostat',
|
||||
type: 'number',
|
||||
description:
|
||||
'Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat ETA',
|
||||
name: 'mirostatEta',
|
||||
type: 'number',
|
||||
description:
|
||||
'Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Mirostat TAU',
|
||||
name: 'mirostatTau',
|
||||
type: 'number',
|
||||
description:
|
||||
'Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Context Window Size',
|
||||
name: 'numCtx',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets the size of the context window used to generate the next token. (Default: 2048) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Number of GQA groups',
|
||||
name: 'numGqa',
|
||||
type: 'number',
|
||||
description:
|
||||
'The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Number of GPU',
|
||||
name: 'numGpu',
|
||||
type: 'number',
|
||||
description:
|
||||
'The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Number of Thread',
|
||||
name: 'numThread',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Repeat Last N',
|
||||
name: 'repeatLastN',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Repeat Penalty',
|
||||
name: 'repeatPenalty',
|
||||
type: 'number',
|
||||
description:
|
||||
'Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Stop Sequence',
|
||||
name: 'stop',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description:
|
||||
'Sets the stop sequences to use. Use comma to seperate different sequences. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Tail Free Sampling',
|
||||
name: 'tfsZ',
|
||||
type: 'number',
|
||||
description:
|
||||
'Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (Default: 1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs</a> for more details',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const baseUrl = nodeData.inputs?.baseUrl as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const mirostat = nodeData.inputs?.mirostat as string
|
||||
const mirostatEta = nodeData.inputs?.mirostatEta as string
|
||||
const mirostatTau = nodeData.inputs?.mirostatTau as string
|
||||
const numCtx = nodeData.inputs?.numCtx as string
|
||||
const numGqa = nodeData.inputs?.numGqa as string
|
||||
const numGpu = nodeData.inputs?.numGpu as string
|
||||
const numThread = nodeData.inputs?.numThread as string
|
||||
const repeatLastN = nodeData.inputs?.repeatLastN as string
|
||||
const repeatPenalty = nodeData.inputs?.repeatPenalty as string
|
||||
const stop = nodeData.inputs?.stop as string
|
||||
const tfsZ = nodeData.inputs?.tfsZ as string
|
||||
const toolSystemPromptTemplate = nodeData.inputs?.toolSystemPromptTemplate as string
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: OllamaFunctionsInput = {
|
||||
baseUrl,
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
toolSystemPromptTemplate: toolSystemPromptTemplate ? toolSystemPromptTemplate : DEFAULT_TOOL_SYSTEM_TEMPLATE
|
||||
}
|
||||
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (mirostat) obj.mirostat = parseFloat(mirostat)
|
||||
if (mirostatEta) obj.mirostatEta = parseFloat(mirostatEta)
|
||||
if (mirostatTau) obj.mirostatTau = parseFloat(mirostatTau)
|
||||
if (numCtx) obj.numCtx = parseFloat(numCtx)
|
||||
if (numGqa) obj.numGqa = parseFloat(numGqa)
|
||||
if (numGpu) obj.numGpu = parseFloat(numGpu)
|
||||
if (numThread) obj.numThread = parseFloat(numThread)
|
||||
if (repeatLastN) obj.repeatLastN = parseFloat(repeatLastN)
|
||||
if (repeatPenalty) obj.repeatPenalty = parseFloat(repeatPenalty)
|
||||
if (tfsZ) obj.tfsZ = parseFloat(tfsZ)
|
||||
if (stop) {
|
||||
const stopSequences = stop.split(',')
|
||||
obj.stop = stopSequences
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new OllamaFunctions(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
interface ChatOllamaFunctionsCallOptions extends BaseFunctionCallOptions {}
|
||||
|
||||
type OllamaFunctionsInput = Partial<ChatOllamaInput> &
|
||||
BaseChatModelParams & {
|
||||
llm?: OllamaChat
|
||||
toolSystemPromptTemplate?: string
|
||||
}
|
||||
|
||||
class OllamaFunctions extends BaseChatModel<ChatOllamaFunctionsCallOptions> {
|
||||
llm: OllamaChat
|
||||
|
||||
fields?: OllamaFunctionsInput
|
||||
|
||||
toolSystemPromptTemplate: string = DEFAULT_TOOL_SYSTEM_TEMPLATE
|
||||
|
||||
protected defaultResponseFunction = {
|
||||
name: '__conversational_response',
|
||||
description: 'Respond conversationally if no other tools should be called for a given query.',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
response: {
|
||||
type: 'string',
|
||||
description: 'Conversational response to the user.'
|
||||
}
|
||||
},
|
||||
required: ['response']
|
||||
}
|
||||
}
|
||||
|
||||
static lc_name(): string {
|
||||
return 'OllamaFunctions'
|
||||
}
|
||||
|
||||
constructor(fields?: OllamaFunctionsInput) {
|
||||
super(fields ?? {})
|
||||
this.fields = fields
|
||||
this.llm = fields?.llm ?? new OllamaChat({ ...fields, format: 'json' })
|
||||
this.toolSystemPromptTemplate = fields?.toolSystemPromptTemplate ?? this.toolSystemPromptTemplate
|
||||
}
|
||||
|
||||
invocationParams() {
|
||||
return this.llm.invocationParams()
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
_identifyingParams() {
|
||||
return this.llm._identifyingParams()
|
||||
}
|
||||
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun | undefined
|
||||
): Promise<ChatResult> {
|
||||
let functions = options.functions ?? []
|
||||
if (options.function_call !== undefined) {
|
||||
functions = functions.filter((fn) => fn.name === options.function_call?.name)
|
||||
if (!functions.length) {
|
||||
throw new Error(`If "function_call" is specified, you must also pass a matching function in "functions".`)
|
||||
}
|
||||
} else if (functions.length === 0) {
|
||||
functions.push(this.defaultResponseFunction)
|
||||
}
|
||||
const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate(this.toolSystemPromptTemplate)
|
||||
const systemMessage = await systemPromptTemplate.format({
|
||||
tools: JSON.stringify(functions, null, 2)
|
||||
})
|
||||
|
||||
let generatedMessages = [systemMessage, ...messages]
|
||||
let isToolResponse = false
|
||||
if (
|
||||
messages.length > 3 &&
|
||||
messages[messages.length - 1]._getType() === 'tool' &&
|
||||
functions.length &&
|
||||
messages[messages.length - 1].additional_kwargs?.name === functions[0].name
|
||||
) {
|
||||
const lastToolQuestion = messages[messages.length - 3].content
|
||||
const lastToolResp = messages.pop()?.content
|
||||
// Pop the message again to get rid of tool call message
|
||||
messages.pop()?.content
|
||||
const humanMessage = new HumanMessage({
|
||||
content: `Given user question: ${lastToolQuestion} and answer: ${lastToolResp}\n\nWrite a natural language response`
|
||||
})
|
||||
generatedMessages = [...messages, humanMessage]
|
||||
isToolResponse = true
|
||||
this.llm = new OllamaChat({ ...this.fields })
|
||||
}
|
||||
const chatResult = await this.llm._generate(generatedMessages, options, runManager)
|
||||
const chatGenerationContent = chatResult.generations[0].message.content
|
||||
|
||||
if (typeof chatGenerationContent !== 'string') {
|
||||
throw new Error('OllamaFunctions does not support non-string output.')
|
||||
}
|
||||
|
||||
if (isToolResponse) {
|
||||
return {
|
||||
generations: [
|
||||
{
|
||||
message: new AIMessage({
|
||||
content: chatGenerationContent
|
||||
}),
|
||||
text: chatGenerationContent
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
let parsedChatResult
|
||||
try {
|
||||
parsedChatResult = JSON.parse(chatGenerationContent)
|
||||
} catch (e) {
|
||||
throw new Error(`"${this.llm.model}" did not respond with valid JSON. Please try again.`)
|
||||
}
|
||||
|
||||
const calledToolName = parsedChatResult.tool
|
||||
const calledToolArguments = parsedChatResult.tool_input
|
||||
const calledTool = functions.find((fn) => fn.name === calledToolName)
|
||||
if (calledTool === undefined) {
|
||||
throw new Error(`Failed to parse a function call from ${this.llm.model} output: ${chatGenerationContent}`)
|
||||
}
|
||||
|
||||
if (calledTool.name === this.defaultResponseFunction.name) {
|
||||
return {
|
||||
generations: [
|
||||
{
|
||||
message: new AIMessage({
|
||||
content: calledToolArguments.response
|
||||
}),
|
||||
text: calledToolArguments.response
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
const responseMessageWithFunctions = new AIMessage({
|
||||
content: '',
|
||||
tool_calls: [
|
||||
{
|
||||
name: calledToolName,
|
||||
args: calledToolArguments || {}
|
||||
}
|
||||
],
|
||||
invalid_tool_calls: [],
|
||||
additional_kwargs: {
|
||||
function_call: {
|
||||
name: calledToolName,
|
||||
arguments: calledToolArguments ? JSON.stringify(calledToolArguments) : ''
|
||||
},
|
||||
tool_calls: [
|
||||
{
|
||||
id: Date.now().toString(),
|
||||
type: 'function',
|
||||
function: {
|
||||
name: calledToolName,
|
||||
arguments: calledToolArguments ? JSON.stringify(calledToolArguments) : ''
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
generations: [{ message: responseMessageWithFunctions, text: '' }]
|
||||
}
|
||||
}
|
||||
|
||||
override bindTools(
|
||||
tools: StructuredToolInterface[],
|
||||
kwargs?: Partial<ICommonObject>
|
||||
): RunnableInterface<BaseLanguageModelInput, AIMessageChunk, ICommonObject> {
|
||||
return this.bind({
|
||||
functions: tools.map((tool) => convertToOpenAIFunction(tool)),
|
||||
...kwargs
|
||||
} as Partial<ICommonObject>)
|
||||
}
|
||||
|
||||
_llmType(): string {
|
||||
return 'ollama_functions'
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
_combineLLMOutput() {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export interface ChatOllamaInput extends OllamaInput {}
|
||||
|
||||
interface ChatOllamaCallOptions extends BaseLanguageModelCallOptions {}
|
||||
|
||||
class OllamaChat extends SimpleChatModel<ChatOllamaCallOptions> implements ChatOllamaInput {
|
||||
static lc_name() {
|
||||
return 'ChatOllama'
|
||||
}
|
||||
|
||||
lc_serializable = true
|
||||
|
||||
model = 'llama2'
|
||||
|
||||
baseUrl = 'http://localhost:11434'
|
||||
|
||||
keepAlive = '5m'
|
||||
|
||||
embeddingOnly?: boolean
|
||||
|
||||
f16KV?: boolean
|
||||
|
||||
frequencyPenalty?: number
|
||||
|
||||
headers?: Record<string, string>
|
||||
|
||||
logitsAll?: boolean
|
||||
|
||||
lowVram?: boolean
|
||||
|
||||
mainGpu?: number
|
||||
|
||||
mirostat?: number
|
||||
|
||||
mirostatEta?: number
|
||||
|
||||
mirostatTau?: number
|
||||
|
||||
numBatch?: number
|
||||
|
||||
numCtx?: number
|
||||
|
||||
numGpu?: number
|
||||
|
||||
numGqa?: number
|
||||
|
||||
numKeep?: number
|
||||
|
||||
numPredict?: number
|
||||
|
||||
numThread?: number
|
||||
|
||||
penalizeNewline?: boolean
|
||||
|
||||
presencePenalty?: number
|
||||
|
||||
repeatLastN?: number
|
||||
|
||||
repeatPenalty?: number
|
||||
|
||||
ropeFrequencyBase?: number
|
||||
|
||||
ropeFrequencyScale?: number
|
||||
|
||||
temperature?: number
|
||||
|
||||
stop?: string[]
|
||||
|
||||
tfsZ?: number
|
||||
|
||||
topK?: number
|
||||
|
||||
topP?: number
|
||||
|
||||
typicalP?: number
|
||||
|
||||
useMLock?: boolean
|
||||
|
||||
useMMap?: boolean
|
||||
|
||||
vocabOnly?: boolean
|
||||
|
||||
format?: StringWithAutocomplete<'json'>
|
||||
|
||||
constructor(fields: OllamaInput & BaseChatModelParams) {
|
||||
super(fields)
|
||||
this.model = fields.model ?? this.model
|
||||
this.baseUrl = fields.baseUrl?.endsWith('/') ? fields.baseUrl.slice(0, -1) : fields.baseUrl ?? this.baseUrl
|
||||
this.keepAlive = fields.keepAlive ?? this.keepAlive
|
||||
this.embeddingOnly = fields.embeddingOnly
|
||||
this.f16KV = fields.f16KV
|
||||
this.frequencyPenalty = fields.frequencyPenalty
|
||||
this.headers = fields.headers
|
||||
this.logitsAll = fields.logitsAll
|
||||
this.lowVram = fields.lowVram
|
||||
this.mainGpu = fields.mainGpu
|
||||
this.mirostat = fields.mirostat
|
||||
this.mirostatEta = fields.mirostatEta
|
||||
this.mirostatTau = fields.mirostatTau
|
||||
this.numBatch = fields.numBatch
|
||||
this.numCtx = fields.numCtx
|
||||
this.numGpu = fields.numGpu
|
||||
this.numGqa = fields.numGqa
|
||||
this.numKeep = fields.numKeep
|
||||
this.numPredict = fields.numPredict
|
||||
this.numThread = fields.numThread
|
||||
this.penalizeNewline = fields.penalizeNewline
|
||||
this.presencePenalty = fields.presencePenalty
|
||||
this.repeatLastN = fields.repeatLastN
|
||||
this.repeatPenalty = fields.repeatPenalty
|
||||
this.ropeFrequencyBase = fields.ropeFrequencyBase
|
||||
this.ropeFrequencyScale = fields.ropeFrequencyScale
|
||||
this.temperature = fields.temperature
|
||||
this.stop = fields.stop
|
||||
this.tfsZ = fields.tfsZ
|
||||
this.topK = fields.topK
|
||||
this.topP = fields.topP
|
||||
this.typicalP = fields.typicalP
|
||||
this.useMLock = fields.useMLock
|
||||
this.useMMap = fields.useMMap
|
||||
this.vocabOnly = fields.vocabOnly
|
||||
this.format = fields.format
|
||||
}
|
||||
|
||||
_llmType() {
|
||||
return 'ollama'
|
||||
}
|
||||
|
||||
/**
|
||||
* A method that returns the parameters for an Ollama API call. It
|
||||
* includes model and options parameters.
|
||||
* @param options Optional parsed call options.
|
||||
* @returns An object containing the parameters for an Ollama API call.
|
||||
*/
|
||||
invocationParams(options?: this['ParsedCallOptions']) {
|
||||
return {
|
||||
model: this.model,
|
||||
format: this.format,
|
||||
keep_alive: this.keepAlive,
|
||||
options: {
|
||||
embedding_only: this.embeddingOnly,
|
||||
f16_kv: this.f16KV,
|
||||
frequency_penalty: this.frequencyPenalty,
|
||||
logits_all: this.logitsAll,
|
||||
low_vram: this.lowVram,
|
||||
main_gpu: this.mainGpu,
|
||||
mirostat: this.mirostat,
|
||||
mirostat_eta: this.mirostatEta,
|
||||
mirostat_tau: this.mirostatTau,
|
||||
num_batch: this.numBatch,
|
||||
num_ctx: this.numCtx,
|
||||
num_gpu: this.numGpu,
|
||||
num_gqa: this.numGqa,
|
||||
num_keep: this.numKeep,
|
||||
num_predict: this.numPredict,
|
||||
num_thread: this.numThread,
|
||||
penalize_newline: this.penalizeNewline,
|
||||
presence_penalty: this.presencePenalty,
|
||||
repeat_last_n: this.repeatLastN,
|
||||
repeat_penalty: this.repeatPenalty,
|
||||
rope_frequency_base: this.ropeFrequencyBase,
|
||||
rope_frequency_scale: this.ropeFrequencyScale,
|
||||
temperature: this.temperature,
|
||||
stop: options?.stop ?? this.stop,
|
||||
tfs_z: this.tfsZ,
|
||||
top_k: this.topK,
|
||||
top_p: this.topP,
|
||||
typical_p: this.typicalP,
|
||||
use_mlock: this.useMLock,
|
||||
use_mmap: this.useMMap,
|
||||
vocab_only: this.vocabOnly
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_combineLLMOutput() {
|
||||
return {}
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
async *_streamResponseChunksLegacy(
|
||||
input: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
const stream = createOllamaGenerateStream(
|
||||
this.baseUrl,
|
||||
{
|
||||
...this.invocationParams(options),
|
||||
prompt: this._formatMessagesAsPrompt(input)
|
||||
},
|
||||
{
|
||||
...options,
|
||||
headers: this.headers
|
||||
}
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
if (!chunk.done) {
|
||||
yield new ChatGenerationChunk({
|
||||
text: chunk.response,
|
||||
message: new AIMessageChunk({ content: chunk.response })
|
||||
})
|
||||
await runManager?.handleLLMNewToken(chunk.response ?? '')
|
||||
} else {
|
||||
yield new ChatGenerationChunk({
|
||||
text: '',
|
||||
message: new AIMessageChunk({ content: '' }),
|
||||
generationInfo: {
|
||||
model: chunk.model,
|
||||
total_duration: chunk.total_duration,
|
||||
load_duration: chunk.load_duration,
|
||||
prompt_eval_count: chunk.prompt_eval_count,
|
||||
prompt_eval_duration: chunk.prompt_eval_duration,
|
||||
eval_count: chunk.eval_count,
|
||||
eval_duration: chunk.eval_duration
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
input: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
try {
|
||||
const stream = await this.caller.call(async () =>
|
||||
createOllamaChatStream(
|
||||
this.baseUrl,
|
||||
{
|
||||
...this.invocationParams(options),
|
||||
messages: this._convertMessagesToOllamaMessages(input)
|
||||
},
|
||||
{
|
||||
...options,
|
||||
headers: this.headers
|
||||
}
|
||||
)
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
if (!chunk.done) {
|
||||
yield new ChatGenerationChunk({
|
||||
text: chunk.message.content,
|
||||
message: new AIMessageChunk({ content: chunk.message.content })
|
||||
})
|
||||
await runManager?.handleLLMNewToken(chunk.message.content ?? '')
|
||||
} else {
|
||||
yield new ChatGenerationChunk({
|
||||
text: '',
|
||||
message: new AIMessageChunk({ content: '' }),
|
||||
generationInfo: {
|
||||
model: chunk.model,
|
||||
total_duration: chunk.total_duration,
|
||||
load_duration: chunk.load_duration,
|
||||
prompt_eval_count: chunk.prompt_eval_count,
|
||||
prompt_eval_duration: chunk.prompt_eval_duration,
|
||||
eval_count: chunk.eval_count,
|
||||
eval_duration: chunk.eval_duration
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (e: any) {
|
||||
if (e.response?.status === 404) {
|
||||
console.warn(
|
||||
'[WARNING]: It seems you are using a legacy version of Ollama. Please upgrade to a newer version for better chat support.'
|
||||
)
|
||||
yield* this._streamResponseChunksLegacy(input, options, runManager)
|
||||
} else {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected _convertMessagesToOllamaMessages(messages: BaseMessage[]): OllamaMessage[] {
|
||||
return messages.map((message) => {
|
||||
let role
|
||||
if (message._getType() === 'human') {
|
||||
role = 'user'
|
||||
} else if (message._getType() === 'ai' || message._getType() === 'tool') {
|
||||
role = 'assistant'
|
||||
} else if (message._getType() === 'system') {
|
||||
role = 'system'
|
||||
} else {
|
||||
throw new Error(`Unsupported message type for Ollama: ${message._getType()}`)
|
||||
}
|
||||
let content = ''
|
||||
const images = []
|
||||
if (typeof message.content === 'string') {
|
||||
content = message.content
|
||||
} else {
|
||||
for (const contentPart of message.content) {
|
||||
if (contentPart.type === 'text') {
|
||||
content = `${content}\n${contentPart.text}`
|
||||
} else if (contentPart.type === 'image_url' && typeof contentPart.image_url === 'string') {
|
||||
const imageUrlComponents = contentPart.image_url.split(',')
|
||||
// Support both data:image/jpeg;base64,<image> format as well
|
||||
images.push(imageUrlComponents[1] ?? imageUrlComponents[0])
|
||||
} else {
|
||||
throw new Error(
|
||||
`Unsupported message content type. Must either have type "text" or type "image_url" with a string "image_url" field.`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
return {
|
||||
role,
|
||||
content,
|
||||
images
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
protected _formatMessagesAsPrompt(messages: BaseMessage[]): string {
|
||||
const formattedMessages = messages
|
||||
.map((message) => {
|
||||
let messageText
|
||||
if (message._getType() === 'human') {
|
||||
messageText = `[INST] ${message.content} [/INST]`
|
||||
} else if (message._getType() === 'ai') {
|
||||
messageText = message.content
|
||||
} else if (message._getType() === 'system') {
|
||||
messageText = `<<SYS>> ${message.content} <</SYS>>`
|
||||
} else if (ChatMessage.isInstance(message)) {
|
||||
messageText = `\n\n${message.role[0].toUpperCase()}${message.role.slice(1)}: ${message.content}`
|
||||
} else {
|
||||
console.warn(`Unsupported message type passed to Ollama: "${message._getType()}"`)
|
||||
messageText = ''
|
||||
}
|
||||
return messageText
|
||||
})
|
||||
.join('\n')
|
||||
return formattedMessages
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<string> {
|
||||
const chunks = []
|
||||
for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
|
||||
chunks.push(chunk.message.content)
|
||||
}
|
||||
return chunks.join('')
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatOllamaFunction_ChatModels }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M7 27.5c0-1.273.388-2.388.97-3-.582-.612-.97-1.727-.97-3 0-1.293.4-2.422.996-3.028A4.818 4.818 0 0 1 7 15.5c0-2.485 1.79-4.5 4-4.5l.1.001a5.002 5.002 0 0 1 9.8 0L21 11c2.21 0 4 2.015 4 4.5 0 1.139-.376 2.18-.996 2.972.595.606.996 1.735.996 3.028 0 1.273-.389 2.388-.97 3 .581.612.97 1.727.97 3" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/><path d="M9.5 11C9.167 8.5 9 4 11 4c1.5 0 1.667 2.667 2 4m9.5 3c.333-2.5.5-7-1.5-7-1.5 0-1.667 2.667-2 4" stroke="#000" stroke-width="2" stroke-linecap="round"/><circle cx="11" cy="15" r="1" fill="#000"/><circle cx="21" cy="15" r="1" fill="#000"/><path d="M13 17c0-2 2-2.5 3-2.5s3 .5 3 2.5-2 2.5-3 2.5-3-.5-3-2.5Z" stroke="#000" stroke-width="2" stroke-linecap="round"/></svg>
|
||||
|
After Width: | Height: | Size: 834 B |
|
|
@ -0,0 +1,185 @@
|
|||
import { IterableReadableStream } from '@langchain/core/utils/stream'
|
||||
import type { StringWithAutocomplete } from '@langchain/core/utils/types'
|
||||
import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
||||
|
||||
export interface OllamaInput {
|
||||
embeddingOnly?: boolean
|
||||
f16KV?: boolean
|
||||
frequencyPenalty?: number
|
||||
headers?: Record<string, string>
|
||||
keepAlive?: string
|
||||
logitsAll?: boolean
|
||||
lowVram?: boolean
|
||||
mainGpu?: number
|
||||
model?: string
|
||||
baseUrl?: string
|
||||
mirostat?: number
|
||||
mirostatEta?: number
|
||||
mirostatTau?: number
|
||||
numBatch?: number
|
||||
numCtx?: number
|
||||
numGpu?: number
|
||||
numGqa?: number
|
||||
numKeep?: number
|
||||
numPredict?: number
|
||||
numThread?: number
|
||||
penalizeNewline?: boolean
|
||||
presencePenalty?: number
|
||||
repeatLastN?: number
|
||||
repeatPenalty?: number
|
||||
ropeFrequencyBase?: number
|
||||
ropeFrequencyScale?: number
|
||||
temperature?: number
|
||||
stop?: string[]
|
||||
tfsZ?: number
|
||||
topK?: number
|
||||
topP?: number
|
||||
typicalP?: number
|
||||
useMLock?: boolean
|
||||
useMMap?: boolean
|
||||
vocabOnly?: boolean
|
||||
format?: StringWithAutocomplete<'json'>
|
||||
}
|
||||
|
||||
export interface OllamaRequestParams {
|
||||
model: string
|
||||
format?: StringWithAutocomplete<'json'>
|
||||
images?: string[]
|
||||
options: {
|
||||
embedding_only?: boolean
|
||||
f16_kv?: boolean
|
||||
frequency_penalty?: number
|
||||
logits_all?: boolean
|
||||
low_vram?: boolean
|
||||
main_gpu?: number
|
||||
mirostat?: number
|
||||
mirostat_eta?: number
|
||||
mirostat_tau?: number
|
||||
num_batch?: number
|
||||
num_ctx?: number
|
||||
num_gpu?: number
|
||||
num_gqa?: number
|
||||
num_keep?: number
|
||||
num_thread?: number
|
||||
num_predict?: number
|
||||
penalize_newline?: boolean
|
||||
presence_penalty?: number
|
||||
repeat_last_n?: number
|
||||
repeat_penalty?: number
|
||||
rope_frequency_base?: number
|
||||
rope_frequency_scale?: number
|
||||
temperature?: number
|
||||
stop?: string[]
|
||||
tfs_z?: number
|
||||
top_k?: number
|
||||
top_p?: number
|
||||
typical_p?: number
|
||||
use_mlock?: boolean
|
||||
use_mmap?: boolean
|
||||
vocab_only?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
export type OllamaMessage = {
|
||||
role: StringWithAutocomplete<'user' | 'assistant' | 'system'>
|
||||
content: string
|
||||
images?: string[]
|
||||
}
|
||||
|
||||
export interface OllamaGenerateRequestParams extends OllamaRequestParams {
|
||||
prompt: string
|
||||
}
|
||||
|
||||
export interface OllamaChatRequestParams extends OllamaRequestParams {
|
||||
messages: OllamaMessage[]
|
||||
}
|
||||
|
||||
export type BaseOllamaGenerationChunk = {
|
||||
model: string
|
||||
created_at: string
|
||||
done: boolean
|
||||
total_duration?: number
|
||||
load_duration?: number
|
||||
prompt_eval_count?: number
|
||||
prompt_eval_duration?: number
|
||||
eval_count?: number
|
||||
eval_duration?: number
|
||||
}
|
||||
|
||||
export type OllamaGenerationChunk = BaseOllamaGenerationChunk & {
|
||||
response: string
|
||||
}
|
||||
|
||||
export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & {
|
||||
message: OllamaMessage
|
||||
}
|
||||
|
||||
export type OllamaCallOptions = BaseLanguageModelCallOptions & {
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
|
||||
async function* createOllamaStream(url: string, params: OllamaRequestParams, options: OllamaCallOptions) {
|
||||
let formattedUrl = url
|
||||
if (formattedUrl.startsWith('http://localhost:')) {
|
||||
// Node 18 has issues with resolving "localhost"
|
||||
// See https://github.com/node-fetch/node-fetch/issues/1624
|
||||
formattedUrl = formattedUrl.replace('http://localhost:', 'http://127.0.0.1:')
|
||||
}
|
||||
const response = await fetch(formattedUrl, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(params),
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...options.headers
|
||||
},
|
||||
signal: options.signal
|
||||
})
|
||||
if (!response.ok) {
|
||||
let error
|
||||
const responseText = await response.text()
|
||||
try {
|
||||
const json = JSON.parse(responseText)
|
||||
error = new Error(`Ollama call failed with status code ${response.status}: ${json.error}`)
|
||||
} catch (e) {
|
||||
error = new Error(`Ollama call failed with status code ${response.status}: ${responseText}`)
|
||||
}
|
||||
;(error as any).response = response
|
||||
throw error
|
||||
}
|
||||
if (!response.body) {
|
||||
throw new Error('Could not begin Ollama stream. Please check the given URL and try again.')
|
||||
}
|
||||
|
||||
const stream = IterableReadableStream.fromReadableStream(response.body)
|
||||
|
||||
const decoder = new TextDecoder()
|
||||
let extra = ''
|
||||
for await (const chunk of stream) {
|
||||
const decoded = extra + decoder.decode(chunk)
|
||||
const lines = decoded.split('\n')
|
||||
extra = lines.pop() || ''
|
||||
for (const line of lines) {
|
||||
try {
|
||||
yield JSON.parse(line)
|
||||
} catch (e) {
|
||||
console.warn(`Received a non-JSON parseable chunk: ${line}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function* createOllamaGenerateStream(
|
||||
baseUrl: string,
|
||||
params: OllamaGenerateRequestParams,
|
||||
options: OllamaCallOptions
|
||||
): AsyncGenerator<OllamaGenerationChunk> {
|
||||
yield* createOllamaStream(`${baseUrl}/api/generate`, params, options)
|
||||
}
|
||||
|
||||
export async function* createOllamaChatStream(
|
||||
baseUrl: string,
|
||||
params: OllamaChatRequestParams,
|
||||
options: OllamaCallOptions
|
||||
): AsyncGenerator<OllamaChatGenerationChunk> {
|
||||
yield* createOllamaStream(`${baseUrl}/api/chat`, params, options)
|
||||
}
|
||||
|
|
@ -2,9 +2,10 @@ import type { ClientOptions } from 'openai'
|
|||
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, AzureOpenAIInput, LegacyOpenAIInput } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenAI } from './FlowiseChatOpenAI'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class ChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -21,7 +22,7 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI'
|
||||
this.name = 'chatOpenAI'
|
||||
this.version = 5.0
|
||||
this.version = 6.0
|
||||
this.type = 'ChatOpenAI'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -43,71 +44,9 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-turbo-preview',
|
||||
name: 'gpt-4-turbo-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0125-preview',
|
||||
name: 'gpt-4-0125-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-1106-preview',
|
||||
name: 'gpt-4-1106-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-1106-vision-preview',
|
||||
name: 'gpt-4-1106-vision-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-vision-preview',
|
||||
name: 'gpt-4-vision-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0613',
|
||||
name: 'gpt-4-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k-0613',
|
||||
name: 'gpt-4-32k-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-0125',
|
||||
name: 'gpt-3.5-turbo-0125'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-1106',
|
||||
name: 'gpt-3.5-turbo-1106'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-0613',
|
||||
name: 'gpt-3.5-turbo-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k',
|
||||
name: 'gpt-3.5-turbo-16k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k-0613',
|
||||
name: 'gpt-3.5-turbo-16k-0613'
|
||||
}
|
||||
],
|
||||
default: 'gpt-3.5-turbo',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -206,6 +145,13 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatOpenAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
|
@ -221,6 +167,9 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
if (nodeData.inputs?.credentialId) {
|
||||
nodeData.credential = nodeData.inputs?.credentialId
|
||||
}
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
|
||||
import { OpenAI, OpenAISession, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI'
|
||||
this.name = 'chatOpenAI_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatOpenAI'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -35,63 +36,9 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-turbo-preview',
|
||||
name: 'gpt-4-turbo-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0125-preview',
|
||||
name: 'gpt-4-0125-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-1106-preview',
|
||||
name: 'gpt-4-1106-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-vision-preview',
|
||||
name: 'gpt-4-vision-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0613',
|
||||
name: 'gpt-4-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k-0613',
|
||||
name: 'gpt-4-32k-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-1106',
|
||||
name: 'gpt-3.5-turbo-1106'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-0613',
|
||||
name: 'gpt-3.5-turbo-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k',
|
||||
name: 'gpt-3.5-turbo-16k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k-0613',
|
||||
name: 'gpt-3.5-turbo-16k-0613'
|
||||
}
|
||||
],
|
||||
default: 'gpt-3.5-turbo',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -124,16 +71,31 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
|||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'BasePath',
|
||||
name: 'basepath',
|
||||
type: 'string',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatOpenAI_LlamaIndex')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const basePath = nodeData.inputs?.basepath as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
|
@ -144,11 +106,18 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
|||
apiKey: openAIApiKey
|
||||
}
|
||||
|
||||
if (basePath) {
|
||||
obj.additionalSessionOptions = {
|
||||
baseURL: basePath
|
||||
}
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
const openai = new OpenAISession(obj)
|
||||
|
||||
const model = new OpenAI(obj)
|
||||
const model = new OpenAI({ ...obj, session: openai })
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
|||
|
||||
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken: number
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
|
|
@ -19,8 +19,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
) {
|
||||
super(fields, configuration)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
|
||||
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
|
|
@ -33,7 +33,9 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
super.modelName = 'gpt-4-vision-preview'
|
||||
super.maxTokens = 1024
|
||||
if (this.modelName !== 'gpt-4-turbo' && !this.modelName.includes('vision')) {
|
||||
super.modelName = 'gpt-4-turbo'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI Custom'
|
||||
this.name = 'chatOpenAICustom'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ChatOpenAI-Custom'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,80 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatTogetherAI_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatTogetherAI'
|
||||
this.name = 'chatTogetherAI'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatTogetherAI'
|
||||
this.icon = 'togetherai.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around TogetherAI large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatTogetherAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['togetherAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
placeholder: 'mixtral-8x7b-32768',
|
||||
description: 'Refer to <a target="_blank" href="https://docs.together.ai/docs/inference-models">models</a> page'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const togetherAIApiKey = getCredentialParam('togetherAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: any = {
|
||||
model: modelName,
|
||||
temperature: parseFloat(temperature),
|
||||
togetherAIApiKey: togetherAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatTogetherAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatTogetherAI_ChatModels }
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { TogetherLLM, OpenAI } from 'llamaindex'
|
||||
|
||||
class ChatTogetherAI_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatTogetherAI'
|
||||
this.name = 'chatTogetherAI_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatTogetherAI'
|
||||
this.icon = 'togetherai.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around ChatTogetherAI LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(TogetherLLM)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['togetherAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
placeholder: 'mixtral-8x7b-32768',
|
||||
description: 'Refer to <a target="_blank" href="https://docs.together.ai/docs/inference-models">models</a> page'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const togetherAIApiKey = getCredentialParam('togetherAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAI> = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
apiKey: togetherAIApiKey
|
||||
}
|
||||
|
||||
const model = new TogetherLLM(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatTogetherAI_LlamaIndex_ChatModels }
|
||||
|
After Width: | Height: | Size: 1.6 KiB |