Compare commits
743 Commits
flowise@1.
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
54ff43e8f1 | |
|
|
95b2cf7b7f | |
|
|
074bb738a3 | |
|
|
78e60e22d2 | |
|
|
9e88c45051 | |
|
|
363d1bfc44 | |
|
|
9ea439d135 | |
|
|
1015e1193f | |
|
|
7166317482 | |
|
|
3cbbd59242 | |
|
|
90558ca688 | |
|
|
b1e38783e4 | |
|
|
dfdeb02b3a | |
|
|
cacbfa8162 | |
|
|
656f6cad81 | |
|
|
efc6e02828 | |
|
|
512df4197c | |
|
|
4d174495dc | |
|
|
15a416a58f | |
|
|
e69fee1375 | |
|
|
cc24f94358 | |
|
|
b55f87cc40 | |
|
|
7067f90153 | |
|
|
d0354bb25c | |
|
|
96dfedde6e | |
|
|
1367f095d4 | |
|
|
109b0367cc | |
|
|
e2ae524edd | |
|
|
eff1336b82 | |
|
|
18b83333d3 | |
|
|
0fc5e3d0c5 | |
|
|
aec9e7a3b7 | |
|
|
83ecc88b35 | |
|
|
f811fc4e5d | |
|
|
d4f80394d3 | |
|
|
842bfc66fe | |
|
|
72e5287343 | |
|
|
8bb841641e | |
|
|
b662dd79c6 | |
|
|
1849637af8 | |
|
|
21743656a8 | |
|
|
b5ead0745b | |
|
|
371e632a2c | |
|
|
c34eb8ee15 | |
|
|
f2c6a1988f | |
|
|
76c5e6a893 | |
|
|
3ab0d99711 | |
|
|
5ba468b4cc | |
|
|
5e4d640ed7 | |
|
|
6fb775fe95 | |
|
|
88ee9b09a7 | |
|
|
9f9aff34f8 | |
|
|
66e1296a06 | |
|
|
f1e78d870e | |
|
|
be3a887e68 | |
|
|
c8939dc2a6 | |
|
|
34251fa336 | |
|
|
cb93d9d557 | |
|
|
5899e50c54 | |
|
|
e0a03ad46d | |
|
|
582dcc8508 | |
|
|
5a73eaa588 | |
|
|
4ec8376efa | |
|
|
5ba9493b30 | |
|
|
bdbb6f850a | |
|
|
55f52c4d50 | |
|
|
7eb9341fdc | |
|
|
a799ac8087 | |
|
|
76abd20e85 | |
|
|
272fd914bd | |
|
|
f2a0ffe542 | |
|
|
8c66d2c735 | |
|
|
e15e6fafdc | |
|
|
e5f0ca2dd3 | |
|
|
1d9927027d | |
|
|
c42ef95a15 | |
|
|
f64931bfcc | |
|
|
6a58ae4e80 | |
|
|
04e0ce1783 | |
|
|
b5b929e192 | |
|
|
7706b3484a | |
|
|
d50563765e | |
|
|
eb738a1552 | |
|
|
059eae4268 | |
|
|
d734747ec0 | |
|
|
912c8f3d5b | |
|
|
48ac815f8e | |
|
|
2878af69e4 | |
|
|
5d649b27cf | |
|
|
f5b08864b8 | |
|
|
97386bc3b2 | |
|
|
22f39692e5 | |
|
|
82899d9d5d | |
|
|
50c53de296 | |
|
|
e32b643445 | |
|
|
265de4e97e | |
|
|
ff2381741e | |
|
|
e83dcb01b8 | |
|
|
9d10dc4856 | |
|
|
68625c0589 | |
|
|
8ebc4dcfd5 | |
|
|
95f1090bed | |
|
|
5733a8089e | |
|
|
8caca472ba | |
|
|
816436f8fa | |
|
|
0521e6b3f9 | |
|
|
0365afbeeb | |
|
|
0de7fb8509 | |
|
|
b5e502f3b6 | |
|
|
c022972cf8 | |
|
|
b65487564a | |
|
|
49c07552ce | |
|
|
b4829275aa | |
|
|
b3069932e1 | |
|
|
b50103021c | |
|
|
4fbc3f6cfe | |
|
|
d3f03e380e | |
|
|
823cefb5c5 | |
|
|
ee9d3a33fa | |
|
|
cb0eb67df0 | |
|
|
32ad3b1366 | |
|
|
b952350a7b | |
|
|
38ce851200 | |
|
|
1ee6f1f88a | |
|
|
dce84106ef | |
|
|
e851af90b1 | |
|
|
96d4ab66f2 | |
|
|
2048976545 | |
|
|
a9f9c8874c | |
|
|
26e7a1ac35 | |
|
|
43b22476e3 | |
|
|
d4a5474f48 | |
|
|
ef532866fd | |
|
|
a84eabbef2 | |
|
|
d34cef2dc7 | |
|
|
40718bd77a | |
|
|
a6bcaba592 | |
|
|
80f24ac30c | |
|
|
40e36d1b39 | |
|
|
af4e28aa91 | |
|
|
713a1e815d | |
|
|
09569d0b06 | |
|
|
f9195b6a68 | |
|
|
5a137a478c | |
|
|
9971627821 | |
|
|
2254d16c3a | |
|
|
c5e06bce6d | |
|
|
d5a97060e2 | |
|
|
e71266de87 | |
|
|
db452cd74d | |
|
|
adea2f0830 | |
|
|
2b1273ca31 | |
|
|
c4eb75ddde | |
|
|
5775947586 | |
|
|
b7eb876b39 | |
|
|
198fffe331 | |
|
|
a295573f82 | |
|
|
51058b2a31 | |
|
|
fa3d21bc30 | |
|
|
723837b30f | |
|
|
6899b27229 | |
|
|
546eafe6a1 | |
|
|
7360d1d9a6 | |
|
|
4782c0f6fc | |
|
|
f378dcc332 | |
|
|
8d549f87b5 | |
|
|
728af22cc4 | |
|
|
7006d64de0 | |
|
|
6ab259b6aa | |
|
|
4c2ba109fd | |
|
|
95beaba9d9 | |
|
|
f5be889ea8 | |
|
|
f4c7887e50 | |
|
|
b7e4fc9517 | |
|
|
6bd8aaefc8 | |
|
|
d1c8f7eb96 | |
|
|
e4ab2a9e33 | |
|
|
713077381b | |
|
|
2cadd68a43 | |
|
|
1ccd3c7170 | |
|
|
d07bd96c7b | |
|
|
5b0941e7d3 | |
|
|
fec087c54d | |
|
|
a71785f0e2 | |
|
|
b34094035d | |
|
|
1130620d40 | |
|
|
0b3da598dd | |
|
|
788d40f26b | |
|
|
4daf29db80 | |
|
|
a82dd93c6c | |
|
|
e7a58fc700 | |
|
|
c33642cdf9 | |
|
|
57b716c7d7 | |
|
|
d7194e8aaa | |
|
|
024b2ad22e | |
|
|
d96459d87b | |
|
|
1996cc40ba | |
|
|
e630123f63 | |
|
|
eabc84ee9f | |
|
|
0511ea1f56 | |
|
|
20a500efb5 | |
|
|
1129782758 | |
|
|
057e056257 | |
|
|
827de07e94 | |
|
|
19bb23440a | |
|
|
19e14c4798 | |
|
|
658fa3984e | |
|
|
eed7de6df5 | |
|
|
39198a42ad | |
|
|
39f7e4c263 | |
|
|
87e30399d4 | |
|
|
e422ce287b | |
|
|
957694a912 | |
|
|
ea255db15d | |
|
|
b9b0c9d227 | |
|
|
dad30472b6 | |
|
|
70706a7183 | |
|
|
a09b7f7e39 | |
|
|
aa6aa2e461 | |
|
|
a57cd76757 | |
|
|
f116dba84e | |
|
|
794818b434 | |
|
|
6fd0fe60fc | |
|
|
b177644354 | |
|
|
a702e7408c | |
|
|
3a7c2fd4db | |
|
|
414b9f125c | |
|
|
4ca82ee733 | |
|
|
7e84049990 | |
|
|
509b2dd36d | |
|
|
813f622f6d | |
|
|
0267005225 | |
|
|
dee681b63e | |
|
|
0c0308b9c4 | |
|
|
57efa25fe5 | |
|
|
ff09ae6a50 | |
|
|
bf5be755f6 | |
|
|
c11c43cf0d | |
|
|
ec1bbc84bc | |
|
|
ac02cde2fa | |
|
|
187d306653 | |
|
|
0ba6548163 | |
|
|
50a2e911f2 | |
|
|
bb7373ee62 | |
|
|
cd4c659009 | |
|
|
58122e985c | |
|
|
0c649c9ce3 | |
|
|
01559d4da3 | |
|
|
3fbfd3d425 | |
|
|
536da36d48 | |
|
|
39e380eac2 | |
|
|
d437a6fb11 | |
|
|
3ff5a5ee05 | |
|
|
5d14d0af1b | |
|
|
134ecb8b2a | |
|
|
fa081acea0 | |
|
|
49846cd66a | |
|
|
1459190adc | |
|
|
0ebfa68b93 | |
|
|
b20a46a03d | |
|
|
bc32759d96 | |
|
|
f98509226d | |
|
|
534d6e4bbf | |
|
|
5f69a0652e | |
|
|
b98c15d832 | |
|
|
7c4056e305 | |
|
|
ad3d5032a5 | |
|
|
02963ce0d5 | |
|
|
c4cc13c9c7 | |
|
|
56b610cfa2 | |
|
|
eb2a83fda7 | |
|
|
381a4553bd | |
|
|
a4a2fbb08f | |
|
|
4a6e71058c | |
|
|
e95a780b26 | |
|
|
c2ecb48900 | |
|
|
3096a0fa50 | |
|
|
0726fc3751 | |
|
|
2853b3149b | |
|
|
4b01e66814 | |
|
|
25000305ee | |
|
|
c557d8d19c | |
|
|
474681e113 | |
|
|
f16254203e | |
|
|
b6e0b08f92 | |
|
|
dc59b0468f | |
|
|
4a7da99996 | |
|
|
63d9c94048 | |
|
|
6db48a97ed | |
|
|
e1e5ff66f4 | |
|
|
7313891ea1 | |
|
|
188e2ad35a | |
|
|
e58cf02227 | |
|
|
99eaa22af5 | |
|
|
ae78ea6b43 | |
|
|
6d4fa7b368 | |
|
|
7020974b55 | |
|
|
b7bb043d3a | |
|
|
150da68dc8 | |
|
|
2a30dbfd4c | |
|
|
ed6b3b2a6d | |
|
|
131eccef45 | |
|
|
b625104433 | |
|
|
305a585cf4 | |
|
|
90d40e115f | |
|
|
13ee0d0c7b | |
|
|
ac35d5f667 | |
|
|
5f7efd3b83 | |
|
|
6083dfa9cc | |
|
|
fd5a34b8ea | |
|
|
07b4077284 | |
|
|
5a8848daa2 | |
|
|
33eb72f58a | |
|
|
5b94974d80 | |
|
|
2d208e1f99 | |
|
|
99e1d043a4 | |
|
|
8a0af7b446 | |
|
|
88dd9cc6b4 | |
|
|
20929dbff1 | |
|
|
c9079539ef | |
|
|
797ee30c75 | |
|
|
69e082e29f | |
|
|
e04cfba7a5 | |
|
|
d98ac8236a | |
|
|
4b9a7c9b9b | |
|
|
66a83f886a | |
|
|
c35eb0b7e5 | |
|
|
bce7ff9ada | |
|
|
6eab5cf681 | |
|
|
3fda7973bb | |
|
|
2b0ca60686 | |
|
|
8c1a68a37e | |
|
|
7ab96a4c39 | |
|
|
63b8c23072 | |
|
|
a2caf3e265 | |
|
|
c16e0ae9d2 | |
|
|
3b84e718a2 | |
|
|
07503f9be8 | |
|
|
91765c2e7b | |
|
|
de2ac1d13c | |
|
|
214e7c8dc7 | |
|
|
fdc0baab68 | |
|
|
b804ceaa47 | |
|
|
6331a351c6 | |
|
|
e3b5309855 | |
|
|
da43fb7f44 | |
|
|
9dfa259ea5 | |
|
|
b643afae3d | |
|
|
e699bbf2a2 | |
|
|
2df05b3252 | |
|
|
d706ca389f | |
|
|
811a6a0f41 | |
|
|
00bc63296b | |
|
|
afff39e334 | |
|
|
54c59024c5 | |
|
|
1721d45957 | |
|
|
8c65900bd2 | |
|
|
02735a1f1e | |
|
|
a59d0dc769 | |
|
|
52dbfc6a4f | |
|
|
3834fe7fea | |
|
|
c5e0bb94ae | |
|
|
55b06f0581 | |
|
|
893100bc8a | |
|
|
d0b20386e5 | |
|
|
a412bcc84e | |
|
|
79812a23e4 | |
|
|
3c0b543407 | |
|
|
1d0230382b | |
|
|
90dad9aa0a | |
|
|
9c10822546 | |
|
|
43c1d5d017 | |
|
|
f0d129d22d | |
|
|
0b6e576fa0 | |
|
|
d0ddf018c7 | |
|
|
d5b8dcd5b5 | |
|
|
ceebd3e11a | |
|
|
e4a9ee730a | |
|
|
8d88608a68 | |
|
|
3046ccc6a7 | |
|
|
c4b9de2ddc | |
|
|
0244216720 | |
|
|
78e53ed976 | |
|
|
c684cec596 | |
|
|
c76fe7ebee | |
|
|
f477c74e0e | |
|
|
5698a62618 | |
|
|
bdb93f755d | |
|
|
26c52398a0 | |
|
|
256e325086 | |
|
|
930bdd5c51 | |
|
|
7d76c127f1 | |
|
|
c07e908fef | |
|
|
07ce50cbab | |
|
|
c123b5f83a | |
|
|
2d9bf585f3 | |
|
|
ce16fd94f9 | |
|
|
a134ea85eb | |
|
|
714f82a234 | |
|
|
92bdf1cc51 | |
|
|
b7d01f10a4 | |
|
|
6a6cfb61fc | |
|
|
877c283d07 | |
|
|
c5c396a0ed | |
|
|
a555ac2b46 | |
|
|
89307b13c9 | |
|
|
850e50689e | |
|
|
68ac61c95f | |
|
|
bca7e82bf8 | |
|
|
f1c704cfd1 | |
|
|
735425e902 | |
|
|
0521d26c60 | |
|
|
6819d5f66b | |
|
|
2290ba9cc0 | |
|
|
b884e93ba2 | |
|
|
6247b678a8 | |
|
|
46c32693ad | |
|
|
e55975ec7f | |
|
|
7e84268f0d | |
|
|
d8af5effe9 | |
|
|
39f6991b0f | |
|
|
ada19b68a2 | |
|
|
f690943316 | |
|
|
0a5195d1ab | |
|
|
7728178253 | |
|
|
2f4134a291 | |
|
|
68d56be7c0 | |
|
|
e86550a91a | |
|
|
23f7e7802f | |
|
|
35d3b933ed | |
|
|
4071fe58be | |
|
|
a48edcd3a8 | |
|
|
d17280255b | |
|
|
02b72f917a | |
|
|
9ce734c539 | |
|
|
c31b1c356f | |
|
|
b12d0d6996 | |
|
|
0432dc9005 | |
|
|
18b6c0f127 | |
|
|
73f1132544 | |
|
|
4cee518cbf | |
|
|
0bc85593d5 | |
|
|
51c2a93a02 | |
|
|
97a376d6e2 | |
|
|
fbf68309dc | |
|
|
dd0862b6e1 | |
|
|
31c89aa8e6 | |
|
|
15afb8a2dd | |
|
|
78677d9ee5 | |
|
|
3bb2b39896 | |
|
|
d1fdd8b3bd | |
|
|
f9f26204be | |
|
|
38fedc2795 | |
|
|
efe6097ceb | |
|
|
aec0645fba | |
|
|
9da7a1b4de | |
|
|
719efe34c1 | |
|
|
29c86f77bf | |
|
|
76ac97fa50 | |
|
|
b31e8715f4 | |
|
|
9cb901c817 | |
|
|
6c1f0f7330 | |
|
|
a323c65ec4 | |
|
|
8bad360796 | |
|
|
d313dc6754 | |
|
|
46c47017bd | |
|
|
13c97d2f59 | |
|
|
4773a13042 | |
|
|
5a45a99620 | |
|
|
5aa991ae56 | |
|
|
81c07dc8c1 | |
|
|
f0057739c9 | |
|
|
dd64bd028b | |
|
|
a035940d1d | |
|
|
10fc1bf08d | |
|
|
342adff8cb | |
|
|
d67c265c0a | |
|
|
52ffa1772b | |
|
|
e4cc333374 | |
|
|
9c874bb49a | |
|
|
26d5d6d6a2 | |
|
|
6acc921095 | |
|
|
85809a9ecc | |
|
|
a71c5a109d | |
|
|
44c1f54d05 | |
|
|
ef6edd398b | |
|
|
86da67f467 | |
|
|
778e024c02 | |
|
|
dcb1ad15e7 | |
|
|
56b21862a3 | |
|
|
205670375d | |
|
|
e86c15ec93 | |
|
|
4d7c7d6ef5 | |
|
|
6de1e8acec | |
|
|
11219c6549 | |
|
|
430dcedc4d | |
|
|
e2df5e9e01 | |
|
|
a6abd593a6 | |
|
|
55c2a8612b | |
|
|
0a54db71c1 | |
|
|
9072e694ca | |
|
|
4e8bf4903d | |
|
|
d5d690b3c4 | |
|
|
0ddae82fd0 | |
|
|
5471a4c9aa | |
|
|
28b33f2c6b | |
|
|
a132f51727 | |
|
|
2ef0f813c6 | |
|
|
55dad31438 | |
|
|
702b8c1aab | |
|
|
e3c899230c | |
|
|
caf54bf31b | |
|
|
0154de4f93 | |
|
|
cf79176ca6 | |
|
|
1f2e58c5d5 | |
|
|
907d5c7ef7 | |
|
|
4a75396325 | |
|
|
9af4eaaa8c | |
|
|
e110a49a32 | |
|
|
dd89af8a09 | |
|
|
64018b72e8 | |
|
|
f7588007b6 | |
|
|
d0b1980482 | |
|
|
d7f9c07381 | |
|
|
bc054d2fe1 | |
|
|
3f0f0e4d28 | |
|
|
08c07802f5 | |
|
|
ab86294d6b | |
|
|
4be28c4050 | |
|
|
ee2bd518c3 | |
|
|
17a27d92a5 | |
|
|
73112ad122 | |
|
|
188e2c748c | |
|
|
f14039736d | |
|
|
3031942173 | |
|
|
19fb13baf0 | |
|
|
2bb2a7588a | |
|
|
90e6a804e4 | |
|
|
336b4174dc | |
|
|
7d00f6fbe9 | |
|
|
6c80a14e33 | |
|
|
c2ae7e138c | |
|
|
8c494cf17e | |
|
|
7486d33237 | |
|
|
8e80b582bb | |
|
|
842d70bf0d | |
|
|
8990b78e10 | |
|
|
7faaf13ccc | |
|
|
9189b70131 | |
|
|
dcacb02a47 | |
|
|
18e919efb4 | |
|
|
011a0a75c3 | |
|
|
cc6a1f04c6 | |
|
|
5543ef3de4 | |
|
|
5da3e3cc3e | |
|
|
113415e2c9 | |
|
|
62f70ab6ef | |
|
|
95b251f02a | |
|
|
288e451161 | |
|
|
6013743705 | |
|
|
c504f91752 | |
|
|
041bfea940 | |
|
|
340e85da91 | |
|
|
a4131dc21b | |
|
|
c5bd4d4168 | |
|
|
a219efc913 | |
|
|
9cd0362f24 | |
|
|
37828de664 | |
|
|
e684f9a227 | |
|
|
1522acbf5a | |
|
|
7881f295ab | |
|
|
48677c33cc | |
|
|
7e475f1288 | |
|
|
5c6b5b233c | |
|
|
8d62adec2f | |
|
|
eab8c19f8c | |
|
|
aa5d1417a1 | |
|
|
5c8f48c2f1 | |
|
|
c98ef7a8b1 | |
|
|
68bc3c708f | |
|
|
9ab8c36fd0 | |
|
|
e81927ee13 | |
|
|
4604594c55 | |
|
|
02fe500f21 | |
|
|
4107118673 | |
|
|
2ac9e3f7bf | |
|
|
f7d722089b | |
|
|
a382e230f4 | |
|
|
82e78d3e4d | |
|
|
214b312fe5 | |
|
|
2b67346ce0 | |
|
|
436b3aae75 | |
|
|
4d6881b506 | |
|
|
1d122084b9 | |
|
|
517c2f2916 | |
|
|
289b04fb12 | |
|
|
1b69ebdb93 | |
|
|
18c9c1cc51 | |
|
|
f460b761fb | |
|
|
985e454a7f | |
|
|
ca0af72325 | |
|
|
071232091f | |
|
|
63665b37ce | |
|
|
9f4619e408 | |
|
|
905c9fc2be | |
|
|
b960f061eb | |
|
|
66eef84633 | |
|
|
37945fc998 | |
|
|
dc39d7e2be | |
|
|
9b71f683ff | |
|
|
2237b1ab16 | |
|
|
3b788e42e1 | |
|
|
456dfabc6f | |
|
|
8ae848110e | |
|
|
72ec7878b6 | |
|
|
1a7cb5a010 | |
|
|
ae64854bae | |
|
|
71f456af90 | |
|
|
6f7b7408e1 | |
|
|
21c47d8049 | |
|
|
e72d9e4094 | |
|
|
30ff29afc7 | |
|
|
74f7cd6e31 | |
|
|
3e59b9b658 | |
|
|
aeb036bb47 | |
|
|
0f09782874 | |
|
|
393f9b57c6 | |
|
|
1c108f3599 | |
|
|
51388d5057 | |
|
|
36ab1681ac | |
|
|
e154461f1d | |
|
|
e8deeb25cf | |
|
|
b636b4384c | |
|
|
d61e3d53ec | |
|
|
b382dd4c43 | |
|
|
7b0416b680 | |
|
|
29d840c09e | |
|
|
a711e21ac7 | |
|
|
c1767f302d | |
|
|
0606d2c6dd | |
|
|
ee058139ed | |
|
|
b050920a77 | |
|
|
20b2cd2933 | |
|
|
d6030f8e9c | |
|
|
601a4d6b66 | |
|
|
50cef64193 | |
|
|
3d670fec81 | |
|
|
5b126c60bc | |
|
|
09d2b96231 | |
|
|
94d8e003e7 | |
|
|
98acb35376 | |
|
|
1fb3e25f53 | |
|
|
089928aaa8 | |
|
|
657dace89e | |
|
|
274125a289 | |
|
|
2d6dcb3e82 | |
|
|
eb4d54552c | |
|
|
049a35968a | |
|
|
3be2393412 | |
|
|
1c6694b197 | |
|
|
4ce0ee2600 | |
|
|
dc19fde063 | |
|
|
8ca8e0ede0 | |
|
|
3abfa13587 | |
|
|
2279ffd57d | |
|
|
3ce22d0dde | |
|
|
8189f945cc | |
|
|
6395b121b4 | |
|
|
318686e622 | |
|
|
dd32a31a3e | |
|
|
f384ad9086 | |
|
|
7d0ae5286c | |
|
|
59643b65d9 | |
|
|
7e5d8e7294 | |
|
|
193e5c4640 | |
|
|
c24708f53b | |
|
|
bf60a1a2a9 | |
|
|
62ec17d684 | |
|
|
76cb8794bf | |
|
|
b76c3b27a9 | |
|
|
e774bd3c12 | |
|
|
bfa26a72c4 | |
|
|
43fa1166df | |
|
|
9637c12297 | |
|
|
1b8813a8b9 | |
|
|
e7edbc695c | |
|
|
f87d84997c | |
|
|
9222aafc6f | |
|
|
188311187a | |
|
|
8a14a52d90 | |
|
|
398a31f426 | |
|
|
f57daea946 | |
|
|
029d5a9d6e | |
|
|
c15489cbae | |
|
|
7f15494d60 | |
|
|
4d92989977 | |
|
|
27f14ce402 | |
|
|
e07f27c528 | |
|
|
3d2b4077cf | |
|
|
e3982476b0 | |
|
|
51a9808012 | |
|
|
e8c85035f2 | |
|
|
28bfd4128a | |
|
|
66701cec8a | |
|
|
e88859f5d4 | |
|
|
beefcf1f6e | |
|
|
6006157958 | |
|
|
3fb8001907 | |
|
|
d3ce6f859c | |
|
|
c6ae3be257 | |
|
|
60800db347 | |
|
|
826de70c6c | |
|
|
c609c63f44 | |
|
|
1bd1fd5828 | |
|
|
1b308a8b54 | |
|
|
32575828cd | |
|
|
68fbe0ea12 | |
|
|
b492153f8a | |
|
|
dc265eb472 | |
|
|
73f7046316 | |
|
|
57f7bba585 | |
|
|
53c330a53b | |
|
|
8a3320aab9 | |
|
|
0ad9b9ae3d | |
|
|
5bc5378333 | |
|
|
75a2915388 | |
|
|
40f8371de9 | |
|
|
4e3f219e2d | |
|
|
8ce32179ee | |
|
|
c96572e10f | |
|
|
2356cc7abd | |
|
|
98bdda52d7 | |
|
|
ee39ce26c1 | |
|
|
4390734854 | |
|
|
c07c86e71b | |
|
|
f0959fe3bf | |
|
|
2c379b299a | |
|
|
3139152cb1 | |
|
|
4061dd0abb | |
|
|
35ad74f8a3 | |
|
|
e2b63e06d6 | |
|
|
ea8e1c8628 | |
|
|
0f09faa1bc |
|
|
@ -5,3 +5,6 @@ build
|
|||
**/node_modules
|
||||
**/build
|
||||
**/dist
|
||||
|
||||
packages/server/.env
|
||||
packages/ui/.env
|
||||
|
|
|
|||
|
|
@ -27,7 +27,8 @@ If applicable, add screenshots to help explain your problem.
|
|||
If applicable, add exported flow in order to help replicating the problem.
|
||||
|
||||
**Setup**
|
||||
- Installation [e.g. docker, `npx flowise start`, `yarn start`]
|
||||
|
||||
- Installation [e.g. docker, `npx flowise start`, `pnpm start`]
|
||||
- Flowise Version [e.g. 1.2.11]
|
||||
- OS: [e.g. macOS, Windows, Linux]
|
||||
- Browser [e.g. chrome, safari]
|
||||
|
|
|
|||
|
|
@ -1,33 +1,33 @@
|
|||
name: autoSyncMergedPullRequest
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
branches: [ "main" ]
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
branches: ['main']
|
||||
jobs:
|
||||
autoSyncMergedPullRequest:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Show PR info
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"prNumber": "${{ github.event.pull_request.number }}",
|
||||
"prTitle": "${{ github.event.pull_request.title }}",
|
||||
"prDescription": "${{ github.event.pull_request.description }}",
|
||||
"sha": "${{ github.sha }}"
|
||||
}
|
||||
autoSyncMergedPullRequest:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Show PR info
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"prNumber": "${{ github.event.pull_request.number }}",
|
||||
"prTitle": "${{ github.event.pull_request.title }}",
|
||||
"prDescription": "",
|
||||
"sha": "${{ github.sha }}"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,36 +1,36 @@
|
|||
name: autoSyncSingleCommit
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
doNotAutoSyncSingleCommit:
|
||||
if: github.event.commits[1] != null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: IGNORE autoSyncSingleCommit
|
||||
run: |
|
||||
echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment.
|
||||
autoSyncSingleCommit:
|
||||
if: github.event.commits[1] == null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: autoSyncSingleCommit
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJSON(github) }}
|
||||
run: |
|
||||
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"sha": "${{ github.sha }}",
|
||||
"commitMessage": "${{ github.event.commits[0].message }}"
|
||||
}
|
||||
doNotAutoSyncSingleCommit:
|
||||
if: github.event.commits[1] != null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: IGNORE autoSyncSingleCommit
|
||||
run: |
|
||||
echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment.
|
||||
autoSyncSingleCommit:
|
||||
if: github.event.commits[1] == null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: autoSyncSingleCommit
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJSON(github) }}
|
||||
run: |
|
||||
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"sha": "${{ github.sha }}",
|
||||
"commitMessage": "${{ github.event.commits[0].id }}"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{github.event.inputs.node_version}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: flowiseai/flowise:${{github.event.inputs.tag_version}}
|
||||
|
|
@ -1,17 +1,13 @@
|
|||
name: Node CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
|
|
@ -22,16 +18,32 @@ jobs:
|
|||
env:
|
||||
PUPPETEER_SKIP_DOWNLOAD: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
version: 9.0.4
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- run: npm i -g yarn
|
||||
|
||||
- run: yarn install --ignore-engines
|
||||
|
||||
- run: yarn lint
|
||||
|
||||
- run: yarn build
|
||||
check-latest: false
|
||||
cache: 'pnpm'
|
||||
- run: npm i -g pnpm
|
||||
- run: pnpm install
|
||||
- run: ./node_modules/.bin/cypress install
|
||||
- run: pnpm lint
|
||||
- run: pnpm build
|
||||
- name: Install dependencies
|
||||
uses: cypress-io/github-action@v6
|
||||
with:
|
||||
working-directory: ./
|
||||
runTests: false
|
||||
- name: Cypress test
|
||||
uses: cypress-io/github-action@v6
|
||||
with:
|
||||
install: false
|
||||
working-directory: packages/server
|
||||
start: pnpm start
|
||||
wait-on: 'http://localhost:3000'
|
||||
wait-on-timeout: 120
|
||||
browser: chrome
|
||||
|
|
|
|||
|
|
@ -11,6 +11,9 @@
|
|||
**/logs
|
||||
**/*.log
|
||||
|
||||
## pnpm
|
||||
.pnpm-store/
|
||||
|
||||
## build
|
||||
**/dist
|
||||
**/build
|
||||
|
|
@ -44,3 +47,60 @@
|
|||
|
||||
## compressed
|
||||
**/*.tgz
|
||||
|
||||
## vscode
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
!.vscode/*.code-snippets
|
||||
|
||||
# Local History for Visual Studio Code
|
||||
.history/
|
||||
|
||||
## other keys
|
||||
*.key
|
||||
*.keys
|
||||
*.priv
|
||||
*.rsa
|
||||
*.key.json
|
||||
|
||||
## ssh keys
|
||||
*.ssh
|
||||
*.ssh-key
|
||||
.key-mrc
|
||||
|
||||
## Certificate Authority
|
||||
*.ca
|
||||
|
||||
## Certificate
|
||||
*.crt
|
||||
|
||||
## Certificate Sign Request
|
||||
*.csr
|
||||
|
||||
## Certificate
|
||||
*.der
|
||||
|
||||
## Key database file
|
||||
*.kdb
|
||||
|
||||
## OSCP request data
|
||||
*.org
|
||||
|
||||
## PKCS #12
|
||||
*.p12
|
||||
|
||||
## PEM-encoded certificate data
|
||||
*.pem
|
||||
|
||||
## Random number seed
|
||||
*.rnd
|
||||
|
||||
## SSLeay data
|
||||
*.ssleay
|
||||
|
||||
## S/MIME message
|
||||
*.smime
|
||||
*.vsix
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
yarn quick # prettify
|
||||
yarn lint-staged # eslint lint(also include prettify but prettify support more file extensions than eslint, so run prettify first)
|
||||
pnpm quick # prettify
|
||||
pnpm lint-staged # eslint lint(also include prettify but prettify support more file extensions than eslint, so run prettify first)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
auto-install-peers = true
|
||||
strict-peer-dependencies = false
|
||||
prefer-workspace-packages = true
|
||||
link-workspace-packages = deep
|
||||
hoist = true
|
||||
shamefully-hoist = true
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
**/node_modules
|
||||
**/dist
|
||||
**/build
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
module.exports = {
|
||||
printWidth: 140,
|
||||
singleQuote: true,
|
||||
jsxSingleQuote: true,
|
||||
trailingComma: 'none',
|
||||
tabWidth: 4,
|
||||
semi: false,
|
||||
endOfLine: 'auto'
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
English | [中文](<./CODE_OF_CONDUCT-ZH.md>)
|
||||
English | [中文](./i18n/CODE_OF_CONDUCT-ZH.md)
|
||||
|
||||
## Our Pledge
|
||||
|
||||
|
|
|
|||
|
|
@ -1,157 +0,0 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# 贡献给 Flowise
|
||||
|
||||
[English](./CONTRIBUTING.md) | 中文
|
||||
|
||||
我们欢迎任何形式的贡献。
|
||||
|
||||
## ⭐ 点赞
|
||||
|
||||
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
## 🙋 问题和回答
|
||||
|
||||
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
|
||||
|
||||
## 🙌 分享 Chatflow
|
||||
|
||||
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON,附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
|
||||
|
||||
## 💡 想法
|
||||
|
||||
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
|
||||
|
||||
## 🐞 报告错误
|
||||
|
||||
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
|
||||
|
||||
## 👨💻 贡献代码
|
||||
|
||||
不确定要贡献什么?一些想法:
|
||||
|
||||
- 从 Langchain 创建新组件
|
||||
- 更新现有组件,如扩展功能、修复错误
|
||||
- 添加新的 Chatflow 想法
|
||||
|
||||
### 开发人员
|
||||
|
||||
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
||||
|
||||
- `server`:用于提供 API 逻辑的 Node 后端
|
||||
- `ui`:React 前端
|
||||
- `components`:Langchain 组件
|
||||
|
||||
#### 先决条件
|
||||
|
||||
- 安装 [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
|
||||
```bash
|
||||
npm i -g yarn
|
||||
```
|
||||
|
||||
#### 逐步指南
|
||||
|
||||
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
2. 克隆你 fork 的存储库。
|
||||
|
||||
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
|
||||
|
||||
- 对于功能分支:`feature/<你的新功能>`
|
||||
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
|
||||
|
||||
4. 切换到新创建的分支。
|
||||
|
||||
5. 进入存储库文件夹
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
6. 安装所有模块的依赖项:
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
```
|
||||
|
||||
7. 构建所有代码:
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
```
|
||||
|
||||
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
```
|
||||
|
||||
9. 开发时:
|
||||
|
||||
- 在`packages/ui`中创建`.env`文件并指定`PORT`(参考`.env.example`)
|
||||
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`)
|
||||
- 运行
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
```
|
||||
|
||||
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
|
||||
|
||||
对于`packages/components`中进行的更改,再次运行`yarn build`以应用更改。
|
||||
|
||||
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
```
|
||||
|
||||
和
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| --------------------------- | ------------------------------------------------------ | ----------------------------------------------- | ----------------------------------- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
|
||||
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
```
|
||||
npx flowise start --PORT=3000 --DEBUG=true
|
||||
```
|
||||
|
||||
## 📖 贡献文档
|
||||
|
||||
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
|
||||
|
||||
## 🏷️ Pull Request 流程
|
||||
|
||||
当您打开一个 Pull Request 时,FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
|
||||
|
||||
##
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# Contributing to Flowise
|
||||
|
||||
English | [中文](./CONTRIBUTING-ZH.md)
|
||||
English | [中文](./i18n/CONTRIBUTING-ZH.md)
|
||||
|
||||
We appreciate any form of contributions.
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ Found an issue? [Report it](https://github.com/FlowiseAI/Flowise/issues/new/choo
|
|||
|
||||
Not sure what to contribute? Some ideas:
|
||||
|
||||
- Create new components from Langchain
|
||||
- Create new components from `packages/components`
|
||||
- Update existing components such as extending functionality, fixing bugs
|
||||
- Add new chatflow ideas
|
||||
|
||||
|
|
@ -40,13 +40,13 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
- `server`: Node backend to serve API logics
|
||||
- `ui`: React frontend
|
||||
- `components`: Langchain components
|
||||
- `components`: Third-party nodes integrations
|
||||
|
||||
#### Prerequisite
|
||||
|
||||
- Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
|
||||
- Install [PNPM](https://pnpm.io/installation). The project is configured to use pnpm v9.
|
||||
```bash
|
||||
npm i -g yarn
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
#### Step by step
|
||||
|
|
@ -71,45 +71,45 @@ Flowise has 3 different modules in a single mono repository.
|
|||
6. Install all dependencies of all modules:
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
7. Build all the code:
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
pnpm build
|
||||
```
|
||||
|
||||
8. Start the app on [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
pnpm start
|
||||
```
|
||||
|
||||
9. For development:
|
||||
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
|
||||
- Run
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Any changes made in `packages/ui` or `packages/server` will be reflected on [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
For changes made in `packages/components`, run `yarn build` again to pickup the changes.
|
||||
For changes made in `packages/components`, run `pnpm build` again to pickup the changes.
|
||||
|
||||
10. After making all the changes, run
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
pnpm build
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
pnpm start
|
||||
```
|
||||
|
||||
to make sure everything works fine in production.
|
||||
|
|
@ -120,28 +120,41 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| Variable | Description | Type | Default |
|
||||
| --------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| FLOWISE_USERNAME | Username to login | String | |
|
||||
| FLOWISE_PASSWORD | Password to login | String | |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
|
||||
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_USERNAME | Username to login | String | |
|
||||
| FLOWISE_PASSWORD | Password to login | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DISABLE_CHATFLOW_REUSE | Forces the creation of a new ChatFlow for each call instead of reusing existing ones from cache | Boolean | |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
|
||||
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
27
Dockerfile
|
|
@ -4,7 +4,7 @@
|
|||
# Run image
|
||||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:18-alpine
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
|
@ -12,30 +12,21 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
|
|||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
WORKDIR /usr/src/packages
|
||||
|
||||
# Copy root package.json and lockfile
|
||||
COPY package.json yarn.loc[k] ./
|
||||
|
||||
# Copy components package.json
|
||||
COPY packages/components/package.json ./packages/components/package.json
|
||||
|
||||
# Copy ui package.json
|
||||
COPY packages/ui/package.json ./packages/ui/package.json
|
||||
|
||||
# Copy server package.json
|
||||
COPY packages/server/package.json ./packages/server/package.json
|
||||
|
||||
RUN yarn install
|
||||
WORKDIR /usr/src
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
RUN yarn build
|
||||
RUN pnpm install
|
||||
|
||||
RUN pnpm build
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD [ "yarn", "start" ]
|
||||
CMD [ "pnpm", "start" ]
|
||||
|
|
|
|||
22
README.md
|
|
@ -10,7 +10,7 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
English | [中文](./README-ZH.md)
|
||||
English | [中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
|
||||
<h3>Drag & drop UI to build your customized LLM flow</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
|
|
@ -44,9 +44,9 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
|
||||
1. Go to `docker` folder at the root of the project
|
||||
2. Copy `.env.example` file, paste it into the same location, and rename to `.env`
|
||||
3. `docker-compose up -d`
|
||||
3. `docker compose up -d`
|
||||
4. Open [http://localhost:3000](http://localhost:3000)
|
||||
5. You can bring the containers down by `docker-compose stop`
|
||||
5. You can bring the containers down by `docker compose stop`
|
||||
|
||||
### Docker Image
|
||||
|
||||
|
|
@ -71,13 +71,13 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
- `server`: Node backend to serve API logics
|
||||
- `ui`: React frontend
|
||||
- `components`: Langchain components
|
||||
- `components`: Third-party nodes integrations
|
||||
|
||||
### Prerequisite
|
||||
|
||||
- Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
|
||||
- Install [PNPM](https://pnpm.io/installation)
|
||||
```bash
|
||||
npm i -g yarn
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### Setup
|
||||
|
|
@ -97,31 +97,31 @@ Flowise has 3 different modules in a single mono repository.
|
|||
3. Install all dependencies of all modules:
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. Build all the code:
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. Start the app:
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
pnpm start
|
||||
```
|
||||
|
||||
You can now access the app on [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
6. For development build:
|
||||
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
|
||||
- Run
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
module.exports = {
|
||||
presets: [
|
||||
'@babel/preset-typescript',
|
||||
[
|
||||
'@babel/preset-env',
|
||||
{
|
||||
targets: {
|
||||
node: 'current'
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
|
|
@ -3,22 +3,30 @@ DATABASE_PATH=/root/.flowise
|
|||
APIKEY_PATH=/root/.flowise
|
||||
SECRETKEY_PATH=/root/.flowise
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
# NUMBER_OF_PROXIES= 1
|
||||
# CORS_ORIGINS=*
|
||||
# IFRAME_ORIGINS=*
|
||||
|
||||
# DATABASE_TYPE=postgres
|
||||
# DATABASE_PORT=""
|
||||
# DATABASE_PORT=5432
|
||||
# DATABASE_HOST=""
|
||||
# DATABASE_NAME="flowise"
|
||||
# DATABASE_USER=""
|
||||
# DATABASE_PASSWORD=""
|
||||
# DATABASE_NAME=flowise
|
||||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
# FLOWISE_USERNAME=user
|
||||
# FLOWISE_PASSWORD=1234
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
|
||||
# FLOWISE_FILE_SIZE_LIMIT=50mb
|
||||
|
||||
# DISABLE_CHATFLOW_REUSE=true
|
||||
|
||||
# DEBUG=true
|
||||
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
|
||||
# LOG_LEVEL=info (error | warn | info | verbose | debug)
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
|
||||
|
|
@ -27,4 +35,15 @@ LOG_PATH=/root/.flowise/logs
|
|||
# LANGCHAIN_API_KEY=your_api_key
|
||||
# LANGCHAIN_PROJECT=your_project
|
||||
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
|
||||
# Uncomment the following line to enable model list config, load the list of models from your local config file
|
||||
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
|
||||
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
|
||||
|
||||
# STORAGE_TYPE=local (local | s3)
|
||||
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
|
||||
# S3_STORAGE_BUCKET_NAME=flowise
|
||||
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
|
||||
# S3_STORAGE_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
# S3_STORAGE_REGION=us-west-2
|
||||
|
|
@ -1,21 +1,25 @@
|
|||
FROM node:18-alpine
|
||||
# Stage 1: Build stage
|
||||
FROM node:20-alpine as build
|
||||
|
||||
USER root
|
||||
|
||||
RUN apk add --no-cache git
|
||||
RUN apk add --no-cache python3 py3-pip make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
# Skip downloading Chrome for Puppeteer (saves build time)
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
# You can install a specific version like: flowise@1.0.0
|
||||
# Install latest Flowise globally (specific version can be set: flowise@1.0.0)
|
||||
RUN npm install -g flowise
|
||||
|
||||
WORKDIR /data
|
||||
# Stage 2: Runtime stage
|
||||
FROM node:20-alpine
|
||||
|
||||
CMD "flowise"
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev
|
||||
|
||||
# Set the environment variable for Puppeteer to find Chromium
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
# Copy Flowise from the build stage
|
||||
COPY --from=build /usr/local/lib/node_modules /usr/local/lib/node_modules
|
||||
COPY --from=build /usr/local/bin /usr/local/bin
|
||||
|
||||
ENTRYPOINT ["flowise", "start"]
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
|
|||
## Usage
|
||||
|
||||
1. Create `.env` file and specify the `PORT` (refer to `.env.example`)
|
||||
2. `docker-compose up -d`
|
||||
2. `docker compose up -d`
|
||||
3. Open [http://localhost:3000](http://localhost:3000)
|
||||
4. You can bring the containers down by `docker-compose stop`
|
||||
4. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
|
|
@ -19,9 +19,9 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
|
|||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
```
|
||||
3. `docker-compose up -d`
|
||||
3. `docker compose up -d`
|
||||
4. Open [http://localhost:3000](http://localhost:3000)
|
||||
5. You can bring the containers down by `docker-compose stop`
|
||||
5. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
|
|
@ -31,5 +31,6 @@ If you like to persist your data (flows, logs, apikeys, credentials), set these
|
|||
- APIKEY_PATH=/root/.flowise
|
||||
- LOG_PATH=/root/.flowise/logs
|
||||
- SECRETKEY_PATH=/root/.flowise
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
|
|
|||
|
|
@ -6,8 +6,11 @@ services:
|
|||
restart: always
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- DEBUG=${DEBUG}
|
||||
- DATABASE_PATH=${DATABASE_PATH}
|
||||
- DATABASE_TYPE=${DATABASE_TYPE}
|
||||
|
|
@ -17,14 +20,17 @@ services:
|
|||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_SSL=${DATABASE_SSL}
|
||||
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
|
||||
- APIKEY_PATH=${APIKEY_PATH}
|
||||
- SECRETKEY_PATH=${SECRETKEY_PATH}
|
||||
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
command: /bin/sh -c "sleep 3; flowise start"
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# 贡献者公约行为准则
|
||||
|
||||
[English](<./CODE_OF_CONDUCT.md>) | 中文
|
||||
[English](../CODE_OF_CONDUCT.md) | 中文
|
||||
|
||||
## 我们的承诺
|
||||
|
||||
|
|
@ -44,6 +44,6 @@
|
|||
|
||||
## 归属
|
||||
|
||||
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
|
||||
该行为准则的内容来自于[贡献者公约](http://contributor-covenant.org/)1.4 版,可在[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4)上获取。
|
||||
|
||||
[主页]: http://contributor-covenant.org
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
# 贡献给 Flowise
|
||||
|
||||
[English](../CONTRIBUTING.md) | 中文
|
||||
|
||||
我们欢迎任何形式的贡献。
|
||||
|
||||
## ⭐ 点赞
|
||||
|
||||
点赞并分享[Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
## 🙋 问题和回答
|
||||
|
||||
在[问题和回答](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a)部分搜索任何问题,如果找不到,可以毫不犹豫地创建一个。这可能会帮助到其他有类似问题的人。
|
||||
|
||||
## 🙌 分享 Chatflow
|
||||
|
||||
是的!分享你如何使用 Flowise 是一种贡献方式。将你的 Chatflow 导出为 JSON,附上截图并在[展示和分享](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell)部分分享。
|
||||
|
||||
## 💡 想法
|
||||
|
||||
欢迎各种想法,如新功能、应用集成和区块链网络。在[想法](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas)部分提交。
|
||||
|
||||
## 🐞 报告错误
|
||||
|
||||
发现问题了吗?[报告它](https://github.com/FlowiseAI/Flowise/issues/new/choose)。
|
||||
|
||||
## 👨💻 贡献代码
|
||||
|
||||
不确定要贡献什么?一些想法:
|
||||
|
||||
- 从 `packages/components` 创建新组件
|
||||
- 更新现有组件,如扩展功能、修复错误
|
||||
- 添加新的 Chatflow 想法
|
||||
|
||||
### 开发人员
|
||||
|
||||
Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
||||
|
||||
- `server`:用于提供 API 逻辑的 Node 后端
|
||||
- `ui`:React 前端
|
||||
- `components`:Langchain/LlamaIndex 组件
|
||||
|
||||
#### 先决条件
|
||||
|
||||
- 安装 [PNPM](https://pnpm.io/installation)
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
#### 逐步指南
|
||||
|
||||
1. Fork 官方的[Flowise Github 仓库](https://github.com/FlowiseAI/Flowise)。
|
||||
|
||||
2. 克隆你 fork 的存储库。
|
||||
|
||||
3. 创建一个新的分支,参考[指南](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository)。命名约定:
|
||||
|
||||
- 对于功能分支:`feature/<你的新功能>`
|
||||
- 对于 bug 修复分支:`bugfix/<你的新bug修复>`。
|
||||
|
||||
4. 切换到新创建的分支。
|
||||
|
||||
5. 进入存储库文件夹
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
6. 安装所有模块的依赖项:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
7. 构建所有代码:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
8. 在[http://localhost:3000](http://localhost:3000)上启动应用程序
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
9. 开发时:
|
||||
|
||||
- 在`packages/ui`中创建`.env`文件并指定`VITE_PORT`(参考`.env.example`)
|
||||
- 在`packages/server`中创建`.env`文件并指定`PORT`(参考`.env.example`)
|
||||
- 运行
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
对`packages/ui`或`packages/server`进行的任何更改都将反映在[http://localhost:8080](http://localhost:8080)上
|
||||
|
||||
对于`packages/components`中进行的更改,再次运行`pnpm build`以应用更改。
|
||||
|
||||
10. 做完所有的更改后,运行以下命令来确保在生产环境中一切正常:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
和
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| ---------------------------- | -------------------------------------------------------------------- | ----------------------------------------------- | ----------------------------------- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
|
||||
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
|
||||
| DISABLE_CHATFLOW_REUSE | 强制为每次调用创建一个新的 ChatFlow,而不是重用缓存中的现有 ChatFlow | 布尔值 | |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
|
||||
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | S3 存储文件夹路径, 当`STORAGE_TYPE`是`s3` | 字符串 | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| S3_STORAGE_REGION | S3 存储地区 | 字符串 | |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
```
|
||||
npx flowise start --PORT=3000 --DEBUG=true
|
||||
```
|
||||
|
||||
## 📖 贡献文档
|
||||
|
||||
[Flowise 文档](https://github.com/FlowiseAI/FlowiseDocs)
|
||||
|
||||
## 🏷️ Pull Request 流程
|
||||
|
||||
当您打开一个 Pull Request 时,FlowiseAI 团队的成员将自动收到通知/指派。您也可以在 [Discord](https://discord.gg/jbaHfsRVBW) 上联系我们。
|
||||
|
||||
##
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - LLM アプリを簡単に構築
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
[](https://twitter.com/FlowiseAI)
|
||||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
|
||||
|
||||
<h3>ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
|
||||
## ⚡ クイックスタート
|
||||
|
||||
[NodeJS](https://nodejs.org/en/download) >= 18.15.0 をダウンロードしてインストール
|
||||
|
||||
1. Flowise のインストール
|
||||
```bash
|
||||
npm install -g flowise
|
||||
```
|
||||
2. Flowise の実行
|
||||
|
||||
```bash
|
||||
npx flowise start
|
||||
```
|
||||
|
||||
ユーザー名とパスワードを入力
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) を開く
|
||||
|
||||
## 🐳 Docker
|
||||
|
||||
### Docker Compose
|
||||
|
||||
1. プロジェクトのルートにある `docker` フォルダに移動する
|
||||
2. `.env.example` ファイルをコピーして同じ場所に貼り付け、名前を `.env` に変更する
|
||||
3. `docker compose up -d`
|
||||
4. [http://localhost:3000](http://localhost:3000) を開く
|
||||
5. コンテナを停止するには、`docker compose stop` を使用します
|
||||
|
||||
### Docker Image
|
||||
|
||||
1. ローカルにイメージを構築する:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
2. image を実行:
|
||||
|
||||
```bash
|
||||
docker run -d --name flowise -p 3000:3000 flowise
|
||||
```
|
||||
|
||||
3. image を停止:
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
||||
## 👨💻 開発者向け
|
||||
|
||||
Flowise には、3 つの異なるモジュールが 1 つの mono リポジトリにあります。
|
||||
|
||||
- `server`: API ロジックを提供する Node バックエンド
|
||||
- `ui`: React フロントエンド
|
||||
- `components`: サードパーティノードとの統合
|
||||
|
||||
### 必須条件
|
||||
|
||||
- [PNPM](https://pnpm.io/installation) をインストール
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### セットアップ
|
||||
|
||||
1. リポジトリをクローン
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. リポジトリフォルダに移動
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. すべてのモジュールの依存関係をインストール:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. すべてのコードをビルド:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. アプリを起動:
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
[http://localhost:3000](http://localhost:3000) でアプリにアクセスできるようになりました
|
||||
|
||||
6. 開発用ビルド:
|
||||
|
||||
- `.env` ファイルを作成し、`packages/ui` に `VITE_PORT` を指定する(`.env.example` を参照)
|
||||
- `.env` ファイルを作成し、`packages/server` に `PORT` を指定する(`.env.example` を参照)
|
||||
- 実行
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
コードの変更は [http://localhost:8080](http://localhost:8080) に自動的にアプリをリロードします
|
||||
|
||||
## 🔒 認証
|
||||
|
||||
アプリレベルの認証を有効にするには、 `FLOWISE_USERNAME` と `FLOWISE_PASSWORD` を `packages/server` の `.env` ファイルに追加します:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 環境変数
|
||||
|
||||
Flowise は、インスタンスを設定するためのさまざまな環境変数をサポートしています。`packages/server` フォルダ内の `.env` ファイルで以下の変数を指定することができる。[続き](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)を読む
|
||||
|
||||
## 📖 ドキュメント
|
||||
|
||||
[Flowise ドキュメント](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 セルフホスト
|
||||
|
||||
お客様の既存インフラに Flowise をセルフホストでデプロイ、様々な[デプロイ](https://docs.flowiseai.com/configuration/deployment)をサポートします
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>その他</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
- [Hugging Face Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Hugging Face Spaces"></a>
|
||||
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 クラウドホスト
|
||||
|
||||
近日公開
|
||||
|
||||
## 🙋 サポート
|
||||
|
||||
ご質問、問題提起、新機能のご要望は、[discussion](https://github.com/FlowiseAI/Flowise/discussions)までお気軽にどうぞ
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
これらの素晴らしい貢献者に感謝します
|
||||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[コントリビューティングガイド](CONTRIBUTING.md)を参照してください。質問や問題があれば、[Discord](https://discord.gg/jbaHfsRVBW) までご連絡ください。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 ライセンス
|
||||
|
||||
このリポジトリのソースコードは、[Apache License Version 2.0](LICENSE.md)の下で利用可能です。
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - 간편한 LLM 애플리케이션 제작
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
[](https://twitter.com/FlowiseAI)
|
||||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
|
||||
|
||||
<h3>드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
|
||||
## ⚡빠른 시작 가이드
|
||||
|
||||
18.15.0 버전 이상의 [NodeJS](https://nodejs.org/en/download) 다운로드 및 설치
|
||||
|
||||
1. Flowise 설치
|
||||
```bash
|
||||
npm install -g flowise
|
||||
```
|
||||
2. Flowise 시작하기
|
||||
|
||||
```bash
|
||||
npx flowise start
|
||||
```
|
||||
|
||||
사용자 이름과 비밀번호로 시작하기
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) URL 열기
|
||||
|
||||
## 🐳 도커(Docker)를 활용하여 시작하기
|
||||
|
||||
### 도커 컴포즈 활용
|
||||
|
||||
1. 프로젝트의 최상위(root) 디렉토리에 있는 `docker` 폴더로 이동하세요.
|
||||
2. `.env.example` 파일을 복사한 후, 같은 경로에 붙여넣기 한 다음, `.env`로 이름을 변경합니다.
|
||||
3. `docker compose up -d` 실행
|
||||
4. [http://localhost:3000](http://localhost:3000) URL 열기
|
||||
5. `docker compose stop` 명령어를 통해 컨테이너를 종료시킬 수 있습니다.
|
||||
|
||||
### 도커 이미지 활용
|
||||
|
||||
1. 로컬에서 이미지 빌드하기:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
2. 이미지 실행하기:
|
||||
|
||||
```bash
|
||||
docker run -d --name flowise -p 3000:3000 flowise
|
||||
```
|
||||
|
||||
3. 이미지 종료하기:
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
||||
## 👨💻 개발자들을 위한 가이드
|
||||
|
||||
Flowise는 단일 리포지토리에 3개의 서로 다른 모듈이 있습니다.
|
||||
|
||||
- `server`: API 로직을 제공하는 노드 백엔드
|
||||
- `ui`: 리액트 프론트엔드
|
||||
- `components`: 서드파티 노드 통합을 위한 컴포넌트
|
||||
|
||||
### 사전 설치 요건
|
||||
|
||||
- [PNPM](https://pnpm.io/installation) 설치하기
|
||||
```bash
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### 설치 및 설정
|
||||
|
||||
1. 리포지토리 복제
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. 리포지토리 폴더로 이동
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. 모든 모듈의 종속성 설치:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 모든 코드 빌드하기:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. 애플리케이션 시작:
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
이제 [http://localhost:3000](http://localhost:3000)에서 애플리케이션에 접속할 수 있습니다.
|
||||
|
||||
6. 개발 환경에서 빌드할 경우:
|
||||
|
||||
- `packages/ui`경로에 `.env` 파일을 생성하고 `VITE_PORT`(`.env.example` 참조)를 지정합니다.
|
||||
- `packages/server`경로에 `.env` 파일을 생성하고 `PORT`(`.env.example` 참조)를 지정합니다.
|
||||
- 실행하기
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
코드가 변경되면 [http://localhost:8080](http://localhost:8080)에서 자동으로 애플리케이션을 새로고침 합니다.
|
||||
|
||||
## 🔒 인증
|
||||
|
||||
애플리케이션 수준의 인증을 사용하려면 `packages/server`의 `.env` 파일에 `FLOWISE_USERNAME` 및 `FLOWISE_PASSWORD`를 추가합니다:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 환경 변수
|
||||
|
||||
Flowise는 인스턴스 구성을 위한 다양한 환경 변수를 지원합니다. `packages/server` 폴더 내 `.env` 파일에 다양한 환경 변수를 지정할 수 있습니다. [자세히 보기](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 공식 문서
|
||||
|
||||
[Flowise 문서](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 자체 호스팅 하기
|
||||
|
||||
기존 인프라 환경에서 Flowise를 자체 호스팅으로 배포하세요. 다양한 배포 [deployments](https://docs.flowiseai.com/configuration/deployment) 방법을 지원합니다.
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>그 외</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 클라우드 호스팅 서비스
|
||||
|
||||
곧 출시될 예정입니다.
|
||||
|
||||
## 🙋 기술 지원
|
||||
|
||||
질문, 버그 리포팅, 새로운 기능 요청 등은 [discussion](https://github.com/FlowiseAI/Flowise/discussions) 섹션에서 자유롭게 이야기 해주세요.
|
||||
|
||||
## 🙌 오픈소스 활동에 기여하기
|
||||
|
||||
다음과 같은 멋진 기여자들(contributors)에게 감사드립니다.
|
||||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[contributing guide](CONTRIBUTING.md)를 살펴보세요. 디스코드 [Discord](https://discord.gg/jbaHfsRVBW) 채널에서도 이슈나 질의응답을 진행하실 수 있습니다.
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 라이센스
|
||||
|
||||
본 리포지토리의 소스코드는 [Apache License Version 2.0](LICENSE.md) 라이센스가 적용됩니다.
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](./README.md) | 中文
|
||||
[English](../README.md) | 中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>拖放界面构建定制化的LLM流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
|
|
@ -44,9 +44,9 @@
|
|||
|
||||
1. 进入项目根目录下的 `docker` 文件夹
|
||||
2. 创建 `.env` 文件并指定 `PORT`(参考 `.env.example`)
|
||||
3. 运行 `docker-compose up -d`
|
||||
3. 运行 `docker compose up -d`
|
||||
4. 打开 [http://localhost:3000](http://localhost:3000)
|
||||
5. 可以通过 `docker-compose stop` 停止容器
|
||||
5. 可以通过 `docker compose stop` 停止容器
|
||||
|
||||
### Docker 镜像
|
||||
|
||||
|
|
@ -71,13 +71,13 @@ Flowise 在一个单一的代码库中有 3 个不同的模块。
|
|||
|
||||
- `server`:用于提供 API 逻辑的 Node 后端
|
||||
- `ui`:React 前端
|
||||
- `components`:Langchain 组件
|
||||
- `components`:第三方节点集成
|
||||
|
||||
### 先决条件
|
||||
|
||||
- 安装 [Yarn v1](https://classic.yarnpkg.com/en/docs/install)
|
||||
- 安装 [PNPM](https://pnpm.io/installation)
|
||||
```bash
|
||||
npm i -g yarn
|
||||
npm i -g pnpm
|
||||
```
|
||||
|
||||
### 设置
|
||||
|
|
@ -97,31 +97,31 @@ Flowise 在一个单一的代码库中有 3 个不同的模块。
|
|||
3. 安装所有模块的依赖:
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 构建所有代码:
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
pnpm build
|
||||
```
|
||||
|
||||
5. 启动应用:
|
||||
|
||||
```bash
|
||||
yarn start
|
||||
pnpm start
|
||||
```
|
||||
|
||||
现在可以在 [http://localhost:3000](http://localhost:3000) 访问应用
|
||||
|
||||
6. 用于开发构建:
|
||||
|
||||
- 在 `packages/ui` 中创建 `.env` 文件并指定 `PORT`(参考 `.env.example`)
|
||||
- 在 `packages/ui` 中创建 `.env` 文件并指定 `VITE_PORT`(参考 `.env.example`)
|
||||
- 在 `packages/server` 中创建 `.env` 文件并指定 `PORT`(参考 `.env.example`)
|
||||
- 运行
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
任何代码更改都会自动重新加载应用程序,访问 [http://localhost:8080](http://localhost:8080)
|
||||
57
package.json
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.4.10",
|
||||
"version": "1.8.4",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -11,19 +11,19 @@
|
|||
],
|
||||
"scripts": {
|
||||
"build": "turbo run build",
|
||||
"build-force": "turbo run build --force",
|
||||
"build-force": "pnpm clean && turbo run build --force",
|
||||
"dev": "turbo run dev --parallel",
|
||||
"start": "run-script-os",
|
||||
"start:windows": "cd packages/server/bin && run start",
|
||||
"start:default": "cd packages/server/bin && ./run start",
|
||||
"clean": "npm exec -ws -- rimraf dist build",
|
||||
"clean": "pnpm --filter \"./packages/**\" clean",
|
||||
"nuke": "pnpm --filter \"./packages/**\" nuke && rimraf node_modules .turbo",
|
||||
"format": "prettier --write \"**/*.{ts,tsx,md}\"",
|
||||
"test": "turbo run test",
|
||||
"lint": "eslint \"**/*.{js,jsx,ts,tsx,json,md}\"",
|
||||
"lint-fix": "yarn lint --fix",
|
||||
"lint-fix": "pnpm lint --fix",
|
||||
"quick": "pretty-quick --staged",
|
||||
"postinstall": "husky install",
|
||||
"migration:create": "yarn typeorm migration:create"
|
||||
"migration:create": "pnpm typeorm migration:create"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,json,md}": "eslint --fix"
|
||||
|
|
@ -43,15 +43,56 @@
|
|||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
"husky": "^8.0.1",
|
||||
"kill-port": "^2.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"prettier": "^2.7.1",
|
||||
"pretty-quick": "^3.1.3",
|
||||
"rimraf": "^3.0.2",
|
||||
"run-script-os": "^1.1.6",
|
||||
"turbo": "^1.7.4",
|
||||
"turbo": "1.10.16",
|
||||
"typescript": "^4.8.4"
|
||||
},
|
||||
"pnpm": {
|
||||
"onlyBuiltDependencies": [
|
||||
"faiss-node",
|
||||
"sqlite3"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.15.0"
|
||||
"node": ">=18.15.0 <19.0.0 || ^20",
|
||||
"pnpm": ">=9"
|
||||
},
|
||||
"resolutions": {
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.1",
|
||||
"@google/generative-ai": "^0.7.0",
|
||||
"openai": "4.51.0"
|
||||
},
|
||||
"eslintIgnore": [
|
||||
"**/dist",
|
||||
"**/node_modules",
|
||||
"**/build",
|
||||
"**/package-lock.json"
|
||||
],
|
||||
"prettier": {
|
||||
"printWidth": 140,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"tabWidth": 4,
|
||||
"semi": false,
|
||||
"endOfLine": "auto"
|
||||
},
|
||||
"babel": {
|
||||
"presets": [
|
||||
"@babel/preset-typescript",
|
||||
[
|
||||
"@babel/preset-env",
|
||||
{
|
||||
"targets": {
|
||||
"node": "current"
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class AssemblyAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AssemblyAI API'
|
||||
this.name = 'assemblyAIApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'AssemblyAI Api Key',
|
||||
name: 'assemblyAIApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: AssemblyAIApi }
|
||||
|
|
@ -10,13 +10,8 @@ class AstraDBApi implements INodeCredential {
|
|||
constructor() {
|
||||
this.label = 'Astra DB API'
|
||||
this.name = 'AstraDBApi'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Astra DB Collection Name',
|
||||
name: 'collectionName',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Astra DB Application Token',
|
||||
name: 'applicationToken',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class BaiduApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Baidu API'
|
||||
this.name = 'baiduApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Baidu Api Key',
|
||||
name: 'baiduApiKey',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Baidu Secret Key',
|
||||
name: 'baiduSecretKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: BaiduApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ChatflowApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Chatflow API'
|
||||
this.name = 'chatflowApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Chatflow Api Key',
|
||||
name: 'chatflowApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ChatflowApi }
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ConfluenceApi implements INodeCredential {
|
||||
class ConfluenceCloudApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -8,8 +8,8 @@ class ConfluenceApi implements INodeCredential {
|
|||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Confluence API'
|
||||
this.name = 'confluenceApi'
|
||||
this.label = 'Confluence Cloud API'
|
||||
this.name = 'confluenceCloudApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://support.atlassian.com/confluence-cloud/docs/manage-oauth-access-tokens/">official guide</a> on how to get Access Token or <a target="_blank" href="https://id.atlassian.com/manage-profile/security/api-tokens">API Token</a> on Confluence'
|
||||
|
|
@ -30,4 +30,4 @@ class ConfluenceApi implements INodeCredential {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ConfluenceApi }
|
||||
module.exports = { credClass: ConfluenceCloudApi }
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ConfluenceServerDCApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Confluence Server/Data Center API'
|
||||
this.name = 'confluenceServerDCApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html/">official guide</a> on how to get Personal Access Token</a> on Confluence'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Personal Access Token',
|
||||
name: 'personalAccessToken',
|
||||
type: 'password',
|
||||
placeholder: '<CONFLUENCE_PERSONAL_ACCESS_TOKEN>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ConfluenceServerDCApi }
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Temporary disabled due to the incompatibility with the docker node-alpine:
|
||||
* https://github.com/FlowiseAI/Flowise/pull/2303
|
||||
|
||||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class CouchbaseApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Couchbase API'
|
||||
this.name = 'couchbaseApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Couchbase Connection String',
|
||||
name: 'connectionString',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Couchbase Username',
|
||||
name: 'username',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Couchbase Password',
|
||||
name: 'password',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: CouchbaseApi }
|
||||
*/
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* TODO: Implement codeInterpreter column to chat_message table
|
||||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class E2BApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'E2B API'
|
||||
this.name = 'E2BApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'E2B Api Key',
|
||||
name: 'e2bApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: E2BApi }
|
||||
*/
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ExaSearchApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Exa Search API'
|
||||
this.name = 'exaSearchApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.exa.ai/reference/getting-started#getting-access">official guide</a> on how to get an API Key from Exa'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'ExaSearch Api Key',
|
||||
name: 'exaSearchApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ExaSearchApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class FireCrawlApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'FireCrawl API'
|
||||
this.name = 'fireCrawlApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the FireCrawl API token on your <a target="_blank" href="https://www.firecrawl.dev/">FireCrawl account</a> page.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'FireCrawl API',
|
||||
name: 'firecrawlApiToken',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: FireCrawlApiCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class FireworksApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Fireworks API'
|
||||
this.name = 'fireworksApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Fireworks Api Key',
|
||||
name: 'fireworksApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: FireworksApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class GroqApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Groq API'
|
||||
this.name = 'groqApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Groq Api Key',
|
||||
name: 'groqApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GroqApi }
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class LangWatchApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'LangWatch API'
|
||||
this.name = 'langwatchApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.langwatch.ai/integration/python/guide">integration guide</a> on how to get API keys on LangWatch'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'langWatchApiKey',
|
||||
type: 'password',
|
||||
placeholder: '<LANGWATCH_API_KEY>'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'langWatchEndpoint',
|
||||
type: 'string',
|
||||
default: 'https://app.langwatch.ai'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: LangWatchApi }
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class LLMonitorApi implements INodeCredential {
|
||||
class LunaryApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -8,25 +8,25 @@ class LLMonitorApi implements INodeCredential {
|
|||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'LLMonitor API'
|
||||
this.name = 'llmonitorApi'
|
||||
this.label = 'Lunary API'
|
||||
this.name = 'lunaryApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Refer to <a target="_blank" href="https://llmonitor.com/docs">official guide</a> to get APP ID'
|
||||
this.description = 'Refer to <a target="_blank" href="https://lunary.ai/docs">official guide</a> to get APP ID'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'APP ID',
|
||||
name: 'llmonitorAppId',
|
||||
name: 'lunaryAppId',
|
||||
type: 'password',
|
||||
placeholder: '<LLMonitor_APP_ID>'
|
||||
placeholder: '<Lunary_APP_ID>'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'llmonitorEndpoint',
|
||||
name: 'lunaryEndpoint',
|
||||
type: 'string',
|
||||
default: 'https://app.llmonitor.com'
|
||||
default: 'https://app.lunary.ai'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: LLMonitorApi }
|
||||
module.exports = { credClass: LunaryApi }
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class MotorheadMemoryApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Motorhead Memory API'
|
||||
this.name = 'motorheadMemoryApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.getmetal.io/misc-get-keys">official guide</a> on how to create API key and Client ID on Motorhead Memory'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'apiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MotorheadMemoryApi }
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class MySQLApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'MySQL API'
|
||||
this.name = 'MySQLApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'User',
|
||||
name: 'user',
|
||||
type: 'string',
|
||||
placeholder: '<MYSQL_USERNAME>'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'password',
|
||||
type: 'password',
|
||||
placeholder: '<MYSQL_PASSWORD>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MySQLApi }
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class OpenSearchUrl implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'OpenSearch'
|
||||
this.name = 'openSearchUrl'
|
||||
this.version = 2.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'OpenSearch Url',
|
||||
name: 'openSearchUrl',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'User',
|
||||
name: 'user',
|
||||
type: 'string',
|
||||
placeholder: '<OPENSEARCH_USERNAME>',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'password',
|
||||
type: 'password',
|
||||
placeholder: '<OPENSEARCH_PASSWORD>',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: OpenSearchUrl }
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class SpiderApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Spider API'
|
||||
this.name = 'spiderApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Get your API key from the <a target="_blank" href="https://spider.cloud">Spider</a> dashboard.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Spider API Key',
|
||||
name: 'spiderApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: SpiderApiCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TogetherAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'TogetherAI API'
|
||||
this.name = 'togetherAIApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'TogetherAI Api Key',
|
||||
name: 'togetherAIApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TogetherAIApi }
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class UpstashVectorApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Vector API'
|
||||
this.name = 'upstashVectorApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Upstash Vector REST URL',
|
||||
name: 'UPSTASH_VECTOR_REST_URL',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Upstash Vector REST Token',
|
||||
name: 'UPSTASH_VECTOR_REST_TOKEN',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: UpstashVectorApi }
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class VoyageAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Voyage AI API'
|
||||
this.name = 'voyageAIApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.voyageai.com/install/#authentication-with-api-keys">official guide</a> on how to get an API Key'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Voyage AI Endpoint',
|
||||
name: 'endpoint',
|
||||
type: 'string',
|
||||
default: 'https://api.voyageai.com/v1/embeddings'
|
||||
},
|
||||
{
|
||||
label: 'Voyage AI API Key',
|
||||
name: 'apiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: VoyageAIApi }
|
||||
|
|
@ -1,6 +1,4 @@
|
|||
import gulp from 'gulp'
|
||||
|
||||
const { src, dest } = gulp
|
||||
const { src, dest } = require('gulp')
|
||||
|
||||
function copyIcons() {
|
||||
return src(['nodes/**/*.{jpg,png,svg}']).pipe(dest('dist/nodes'))
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import axios from 'axios'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class Airtable_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -22,7 +24,7 @@ class Airtable_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'Airtable Agent'
|
||||
this.name = 'airtableAgent'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'airtable.svg'
|
||||
|
|
@ -71,6 +73,14 @@ class Airtable_Agents implements INode {
|
|||
default: 100,
|
||||
additionalParams: true,
|
||||
description: 'Number of results to return'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -80,12 +90,24 @@ class Airtable_Agents implements INode {
|
|||
return undefined
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const baseId = nodeData.inputs?.baseId as string
|
||||
const tableId = nodeData.inputs?.tableId as string
|
||||
const returnAll = nodeData.inputs?.returnAll as boolean
|
||||
const limit = nodeData.inputs?.limit as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Vectara chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const accessToken = getCredentialParam('accessToken', credentialData, nodeData)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { AutoGPT } from 'langchain/experimental/autogpt'
|
||||
import { Tool } from 'langchain/tools'
|
||||
import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'
|
||||
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
|
||||
import { flatten } from 'lodash'
|
||||
import { StructuredTool } from 'langchain/tools'
|
||||
import { Tool, StructuredTool } from '@langchain/core/tools'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'
|
||||
import { VectorStoreRetriever } from '@langchain/core/vectorstores'
|
||||
import { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { AutoGPT } from 'langchain/experimental/autogpt'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
type ObjectTool = StructuredTool
|
||||
const FINISH_NAME = 'finish'
|
||||
|
|
@ -26,7 +27,7 @@ class AutoGPT_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'AutoGPT'
|
||||
this.name = 'autoGPT'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AutoGPT'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'autogpt.svg'
|
||||
|
|
@ -69,6 +70,14 @@ class AutoGPT_Agents implements INode {
|
|||
type: 'number',
|
||||
default: 5,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -93,9 +102,21 @@ class AutoGPT_Agents implements INode {
|
|||
return autogpt
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const executor = nodeData.instance as AutoGPT
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the AutoGPT agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let totalAssistantReply = ''
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { VectorStore } from '@langchain/core/vectorstores'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BabyAGI } from './core'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { VectorStore } from 'langchain/vectorstores/base'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class BabyAGI_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class BabyAGI_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'BabyAGI'
|
||||
this.name = 'babyAGI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'BabyAGI'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'babyagi.svg'
|
||||
|
|
@ -39,6 +41,14 @@ class BabyAGI_Agents implements INode {
|
|||
name: 'taskLoop',
|
||||
type: 'number',
|
||||
default: 3
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -53,8 +63,21 @@ class BabyAGI_Agents implements INode {
|
|||
return babyAgi
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const executor = nodeData.instance as BabyAGI
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const objective = input
|
||||
|
||||
const res = await executor.call({ objective })
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { VectorStore } from '@langchain/core/vectorstores'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { VectorStore } from 'langchain/dist/vectorstores/base'
|
||||
import { Document } from 'langchain/document'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
|
||||
class TaskCreationChain extends LLMChain {
|
||||
constructor(prompt: PromptTemplate, llm: BaseChatModel) {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,13 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { getFileFromStorage } from '../../../src'
|
||||
|
||||
class CSV_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -20,7 +23,7 @@ class CSV_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'CSV Agent'
|
||||
this.name = 'csvAgent'
|
||||
this.version = 1.0
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'CSVagent.svg'
|
||||
|
|
@ -47,6 +50,24 @@ class CSV_Agents implements INode {
|
|||
optional: true,
|
||||
placeholder:
|
||||
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Custom Pandas Read_CSV Code',
|
||||
description:
|
||||
'Custom Pandas <a target="_blank" href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html">read_csv</a> function. Takes in an input: "csv_data"',
|
||||
name: 'customReadCSV',
|
||||
default: 'read_csv(csv_data)',
|
||||
type: 'code',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -56,29 +77,56 @@ class CSV_Agents implements INode {
|
|||
return undefined
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const csvFileBase64 = nodeData.inputs?.csvFile as string
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const _customReadCSV = nodeData.inputs?.customReadCSV as string
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the CSV agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let files: string[] = []
|
||||
|
||||
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
|
||||
files = JSON.parse(csvFileBase64)
|
||||
} else {
|
||||
files = [csvFileBase64]
|
||||
}
|
||||
|
||||
let base64String = ''
|
||||
|
||||
for (const file of files) {
|
||||
const splitDataURI = file.split(',')
|
||||
splitDataURI.pop()
|
||||
base64String = splitDataURI.pop() ?? ''
|
||||
if (csvFileBase64.startsWith('FILE-STORAGE::')) {
|
||||
const fileName = csvFileBase64.replace('FILE-STORAGE::', '')
|
||||
if (fileName.startsWith('[') && fileName.endsWith(']')) {
|
||||
files = JSON.parse(fileName)
|
||||
} else {
|
||||
files = [fileName]
|
||||
}
|
||||
const chatflowid = options.chatflowid
|
||||
|
||||
for (const file of files) {
|
||||
const fileData = await getFileFromStorage(file, chatflowid)
|
||||
base64String += fileData.toString('base64')
|
||||
}
|
||||
} else {
|
||||
if (csvFileBase64.startsWith('[') && csvFileBase64.endsWith(']')) {
|
||||
files = JSON.parse(csvFileBase64)
|
||||
} else {
|
||||
files = [csvFileBase64]
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const splitDataURI = file.split(',')
|
||||
splitDataURI.pop()
|
||||
base64String += splitDataURI.pop() ?? ''
|
||||
}
|
||||
}
|
||||
|
||||
const pyodide = await LoadPyodide()
|
||||
|
|
@ -86,6 +134,7 @@ class CSV_Agents implements INode {
|
|||
// First load the csv file and get the dataframe dictionary of column types
|
||||
// For example using titanic.csv: {'PassengerId': 'int64', 'Survived': 'int64', 'Pclass': 'int64', 'Name': 'object', 'Sex': 'object', 'Age': 'float64', 'SibSp': 'int64', 'Parch': 'int64', 'Ticket': 'object', 'Fare': 'float64', 'Cabin': 'object', 'Embarked': 'object'}
|
||||
let dataframeColDict = ''
|
||||
let customReadCSVFunc = _customReadCSV ? _customReadCSV : 'read_csv(csv_data)'
|
||||
try {
|
||||
const code = `import pandas as pd
|
||||
import base64
|
||||
|
|
@ -98,7 +147,7 @@ decoded_data = base64.b64decode(base64_string)
|
|||
|
||||
csv_data = StringIO(decoded_data.decode('utf-8'))
|
||||
|
||||
df = pd.read_csv(csv_data)
|
||||
df = pd.${customReadCSVFunc}
|
||||
my_dict = df.dtypes.astype(str).to_dict()
|
||||
print(my_dict)
|
||||
json.dumps(my_dict)`
|
||||
|
|
|
|||
|
|
@ -1,14 +1,19 @@
|
|||
import { Tool } from 'langchain/tools'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { flatten } from 'lodash'
|
||||
import { AgentStep, BaseMessage, ChainValues, AIMessage, HumanMessage } from 'langchain/schema'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { ChatConversationalAgent } from 'langchain/agents'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { ChatConversationalAgent } from 'langchain/agents'
|
||||
import { renderTemplate } from '@langchain/core/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
||||
|
||||
|
|
@ -42,7 +47,7 @@ class ConversationalAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Agent'
|
||||
this.name = 'conversationalAgent'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -73,29 +78,72 @@ class ConversationalAgent_Agents implements INode {
|
|||
default: DEFAULT_PREFIX,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
|
|
@ -112,27 +160,37 @@ class ConversationalAgent_Agents implements INode {
|
|||
this.sessionId
|
||||
)
|
||||
|
||||
return res?.output
|
||||
let finalRes = res?.output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
finalRes = { text: res?.output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
tools = flatten(tools)
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
/** Bind a stop token to the model */
|
||||
const modelWithStop = model.bind({
|
||||
stop: ['\nObservation']
|
||||
})
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
|
||||
llm: model,
|
||||
|
|
@ -144,12 +202,46 @@ const prepareAgent = async (
|
|||
outputParser
|
||||
})
|
||||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
prompt.promptMessages.push(msg)
|
||||
}
|
||||
|
||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
/** Bind a stop token to the model */
|
||||
const modelWithStop = model.bind({
|
||||
stop: ['\nObservation']
|
||||
})
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
|
|
@ -164,7 +256,8 @@ const prepareAgent = async (
|
|||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
|
|
|
|||
|
|
@ -1,155 +0,0 @@
|
|||
import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
|
||||
import { flatten } from 'lodash'
|
||||
import { ChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
|
||||
import { formatToOpenAIFunction } from 'langchain/tools'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
|
||||
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
|
||||
|
||||
class ConversationalRetrievalAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Agent'
|
||||
this.name = 'conversationalRetrievalAgent'
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = `An agent optimized for retrieval during conversation, answering questions based on past dialogue, all using OpenAI's Function Calling`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
default: defaultMessage,
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return res?.output
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['ai', systemMessage ? systemMessage : defaultMessage],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const modelWithFunctions = model.bind({
|
||||
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
|
||||
})
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithFunctions,
|
||||
new OpenAIFunctionsAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
returnIntermediateSteps: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><circle cx="16" cy="16" r="14" fill="#CC9B7A"/><path d="m10 21 4.5-10L19 21m-7.2-2.857h5.4M18.5 11 23 21" stroke="#1F1F1E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
||||
|
After Width: | Height: | Size: 269 B |
|
|
@ -0,0 +1,142 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { MessageContentTextDetail, ChatMessage, AnthropicAgent, Anthropic } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
|
||||
|
||||
class AnthropicAgent_LlamaIndex_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Anthropic Agent'
|
||||
this.name = 'anthropicAgentLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'AnthropicAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.description = `Agent that uses Anthropic Claude Function Calling to pick the tools and args to call using LlamaIndex`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AnthropicAgent)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool_LlamaIndex',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Anthropic Claude Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const model = nodeData.inputs?.model as Anthropic
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessage) {
|
||||
chatHistory.push({
|
||||
content: systemMessage,
|
||||
role: 'system'
|
||||
})
|
||||
}
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const agent = new AnthropicAgent({
|
||||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
let text = ''
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(response.response.message.content) && response.response.message.content.length > 0) {
|
||||
text = (response.response.message.content[0] as MessageContentTextDetail).text
|
||||
} else {
|
||||
text = response.response.message.content as string
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return usedTools.length ? { text: text, usedTools } : text
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AnthropicAgent_LlamaIndex_Agents }
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChatMessage, OpenAI, OpenAIAgent } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
|
||||
|
||||
class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'OpenAI Tool Agent'
|
||||
this.name = 'openAIToolAgentLlamaIndex'
|
||||
this.version = 2.0
|
||||
this.type = 'OpenAIToolAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'function.svg'
|
||||
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call using LlamaIndex`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(OpenAIAgent)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool_LlamaIndex',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const model = nodeData.inputs?.model as OpenAI
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessage) {
|
||||
chatHistory.push({
|
||||
content: systemMessage,
|
||||
role: 'system'
|
||||
})
|
||||
}
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const agent = new OpenAIAgent({
|
||||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
let text = ''
|
||||
let isStreamingStarted = false
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await agent.chat({
|
||||
message: input,
|
||||
chatHistory,
|
||||
stream: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
for await (const chunk of stream) {
|
||||
//console.log('chunk', chunk)
|
||||
text += chunk.response.delta
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response.delta)
|
||||
if (chunk.sources.length) {
|
||||
for (const sourceTool of chunk.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', usedTools)
|
||||
}
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response.delta)
|
||||
}
|
||||
} else {
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
usedTools.push({
|
||||
tool: sourceTool.tool?.metadata.name ?? '',
|
||||
toolInput: sourceTool.input,
|
||||
toolOutput: sourceTool.output as any
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
text = response.response.message.content as string
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return usedTools.length ? { text: text, usedTools } : text
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: OpenAIFunctionAgent_LlamaIndex_Agents }
|
||||
|
Before Width: | Height: | Size: 2.3 KiB After Width: | Height: | Size: 2.3 KiB |
|
|
@ -1,66 +0,0 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { Tool } from 'langchain/tools'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { flatten } from 'lodash'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ReAct Agent for Chat Models'
|
||||
this.name = 'mrklAgentChat'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = 'Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
tools = flatten(tools)
|
||||
const executor = await initializeAgentExecutorWithOptions(tools, model, {
|
||||
agentType: 'chat-zero-shot-react-description',
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
return executor
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const executor = nodeData.instance as AgentExecutor
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const result = await executor.call({ input }, [...callbacks])
|
||||
|
||||
return result?.output
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentChat_Agents }
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
|
||||
import { Tool } from 'langchain/tools'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { flatten } from 'lodash'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
|
||||
class MRKLAgentLLM_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ReAct Agent for LLMs'
|
||||
this.name = 'mrklAgentLLM'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = 'Agent that uses the ReAct logic to decide what action to take, optimized to be used with LLMs'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
tools = flatten(tools)
|
||||
|
||||
const executor = await initializeAgentExecutorWithOptions(tools, model, {
|
||||
agentType: 'zero-shot-react-description',
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
return executor
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const executor = nodeData.instance as AgentExecutor
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const result = await executor.call({ input }, [...callbacks])
|
||||
|
||||
return result?.output
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentLLM_Agents }
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10 6C10 5.44772 10.4477 5 11 5H21C21.5523 5 22 5.44772 22 6V11C22 13.2091 20.2091 15 18 15H14C11.7909 15 10 13.2091 10 11V6Z" stroke="black" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M16 5V3" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<circle cx="14" cy="9" r="1.5" fill="black"/>
|
||||
<circle cx="18" cy="9" r="1.5" fill="black"/>
|
||||
<path d="M26 27C26 22.0294 21.5228 18 16 18C10.4772 18 6 22.0294 6 27" stroke="black" stroke-width="2" stroke-linecap="round"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 616 B |
|
|
@ -1,16 +1,17 @@
|
|||
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import OpenAI from 'openai'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { getCredentialData, getCredentialParam, getUserHome } from '../../../src/utils'
|
||||
import { MessageContentImageFile, MessageContentText } from 'openai/resources/beta/threads/messages/messages'
|
||||
import * as fsDefault from 'node:fs'
|
||||
import * as path from 'node:path'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
import { flatten, uniqWith, isEqual } from 'lodash'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addSingleFileToStorage } from '../../../src/storageUtils'
|
||||
|
||||
const lenticularBracketRegex = /【[^】]*】/g
|
||||
const imageRegex = /<img[^>]*\/>/g
|
||||
|
||||
class OpenAIAssistant_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -26,7 +27,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAI Assistant'
|
||||
this.name = 'openAIAssistant'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'OpenAIAssistant'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'assistant.svg'
|
||||
|
|
@ -53,6 +54,25 @@ class OpenAIAssistant_Agents implements INode {
|
|||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Tool Choice',
|
||||
name: 'toolChoice',
|
||||
type: 'string',
|
||||
description:
|
||||
'Controls which (if any) tool is called by the model. Can be "none", "auto", "required", or the name of a tool. Refer <a href="https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tool_choice" target="_blank">here</a> for more information',
|
||||
placeholder: 'file_search',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Parallel Tool Calls',
|
||||
name: 'parallelToolCalls',
|
||||
type: 'boolean',
|
||||
description: 'Whether to enable parallel function calling during tool use. Defaults to true',
|
||||
default: true,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Disable File Download',
|
||||
name: 'disableFileDownload',
|
||||
|
|
@ -137,10 +157,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const openai = new OpenAI({ apiKey: openAIApiKey })
|
||||
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
|
||||
try {
|
||||
if (sessionId) await openai.beta.threads.del(sessionId)
|
||||
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
if (sessionId && sessionId.startsWith('thread_')) {
|
||||
await openai.beta.threads.del(sessionId)
|
||||
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
} else {
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -150,6 +174,8 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const disableFileDownload = nodeData.inputs?.disableFileDownload as boolean
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const _toolChoice = nodeData.inputs?.toolChoice as string
|
||||
const parallelToolCalls = nodeData.inputs?.parallelToolCalls as boolean
|
||||
const isStreaming = options.socketIO && options.socketIOClientId
|
||||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
|
|
@ -168,6 +194,9 @@ class OpenAIAssistant_Agents implements INode {
|
|||
tools = flatten(tools)
|
||||
const formattedTools = tools?.map((tool: any) => formatToOpenAIAssistantTool(tool)) ?? []
|
||||
|
||||
const usedTools: IUsedTool[] = []
|
||||
const fileAnnotations = []
|
||||
|
||||
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
|
||||
id: selectedAssistantId
|
||||
})
|
||||
|
|
@ -195,7 +224,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (formattedTools.length) {
|
||||
let filteredTools = []
|
||||
for (const tool of retrievedAssistant.tools) {
|
||||
if (tool.type === 'code_interpreter' || tool.type === 'retrieval') filteredTools.push(tool)
|
||||
if (tool.type === 'code_interpreter' || tool.type === 'file_search') filteredTools.push(tool)
|
||||
}
|
||||
filteredTools = uniqWith([...filteredTools, ...formattedTools], isEqual)
|
||||
// filter out tool with empty function
|
||||
|
|
@ -236,7 +265,8 @@ class OpenAIAssistant_Agents implements INode {
|
|||
(runStatus === 'cancelled' ||
|
||||
runStatus === 'completed' ||
|
||||
runStatus === 'expired' ||
|
||||
runStatus === 'failed')
|
||||
runStatus === 'failed' ||
|
||||
runStatus === 'requires_action')
|
||||
) {
|
||||
clearInterval(timeout)
|
||||
resolve()
|
||||
|
|
@ -259,11 +289,256 @@ class OpenAIAssistant_Agents implements INode {
|
|||
|
||||
// Run assistant thread
|
||||
const llmIds = await analyticHandlers.onLLMStart('ChatOpenAI', input, parentIds)
|
||||
const runThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id
|
||||
})
|
||||
|
||||
const usedTools: IUsedTool[] = []
|
||||
let text = ''
|
||||
let runThreadId = ''
|
||||
let isStreamingStarted = false
|
||||
|
||||
let toolChoice: any
|
||||
if (_toolChoice) {
|
||||
if (_toolChoice === 'file_search') {
|
||||
toolChoice = { type: 'file_search' }
|
||||
} else if (_toolChoice === 'code_interpreter') {
|
||||
toolChoice = { type: 'code_interpreter' }
|
||||
} else if (_toolChoice === 'none' || _toolChoice === 'auto' || _toolChoice === 'required') {
|
||||
toolChoice = _toolChoice
|
||||
} else {
|
||||
toolChoice = { type: 'function', function: { name: _toolChoice } }
|
||||
}
|
||||
}
|
||||
|
||||
if (isStreaming) {
|
||||
const streamThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id,
|
||||
stream: true,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
|
||||
for await (const event of streamThread) {
|
||||
if (event.event === 'thread.run.created') {
|
||||
runThreadId = event.data.id
|
||||
}
|
||||
|
||||
if (event.event === 'thread.message.delta') {
|
||||
const chunk = event.data.delta.content?.[0]
|
||||
|
||||
if (chunk && 'text' in chunk) {
|
||||
if (chunk.text?.annotations?.length) {
|
||||
const message_content = chunk.text
|
||||
const annotations = chunk.text?.annotations
|
||||
|
||||
// Iterate over the annotations
|
||||
for (let index = 0; index < annotations.length; index++) {
|
||||
const annotation = annotations[index]
|
||||
let filePath = ''
|
||||
|
||||
// Gather citations based on annotation attributes
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
|
||||
if (file_citation) {
|
||||
const cited_file = await openai.files.retrieve(file_citation.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
if (file_path) {
|
||||
const cited_file = await openai.files.retrieve(file_path.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the text with a footnote
|
||||
message_content.value = message_content.value?.replace(
|
||||
`${annotation.text}`,
|
||||
`${disableFileDownload ? '' : filePath}`
|
||||
)
|
||||
}
|
||||
|
||||
// Remove lenticular brackets
|
||||
message_content.value = message_content.value?.replace(lenticularBracketRegex, '')
|
||||
|
||||
text += message_content.value ?? ''
|
||||
|
||||
if (message_content.value) {
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', message_content.value)
|
||||
}
|
||||
socketIO.to(socketIOClientId).emit('token', message_content.value)
|
||||
}
|
||||
|
||||
if (fileAnnotations.length) {
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', '')
|
||||
}
|
||||
socketIO.to(socketIOClientId).emit('fileAnnotations', fileAnnotations)
|
||||
}
|
||||
} else {
|
||||
text += chunk.text?.value
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', chunk.text?.value)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', chunk.text?.value)
|
||||
}
|
||||
}
|
||||
|
||||
if (chunk && 'image_file' in chunk && chunk.image_file?.file_id) {
|
||||
const fileId = chunk.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
|
||||
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
|
||||
const base64String = Buffer.from(buffer).toString('base64')
|
||||
|
||||
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
|
||||
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
|
||||
text += imgHTML
|
||||
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', imgHTML)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', imgHTML)
|
||||
}
|
||||
}
|
||||
|
||||
if (event.event === 'thread.run.requires_action') {
|
||||
if (event.data.required_action?.submit_tool_outputs.tool_calls) {
|
||||
const actions: ICommonObject[] = []
|
||||
event.data.required_action.submit_tool_outputs.tool_calls.forEach((item) => {
|
||||
const functionCall = item.function
|
||||
let args = {}
|
||||
try {
|
||||
args = JSON.parse(functionCall.arguments)
|
||||
} catch (e) {
|
||||
console.error('Error parsing arguments, default to empty object')
|
||||
}
|
||||
actions.push({
|
||||
tool: functionCall.name,
|
||||
toolInput: args,
|
||||
toolCallId: item.id
|
||||
})
|
||||
})
|
||||
|
||||
const submitToolOutputs = []
|
||||
for (let i = 0; i < actions.length; i += 1) {
|
||||
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
|
||||
if (!tool) continue
|
||||
|
||||
// Start tool analytics
|
||||
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
|
||||
|
||||
try {
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
|
||||
sessionId: threadId,
|
||||
chatId: options.chatId,
|
||||
input
|
||||
})
|
||||
await analyticHandlers.onToolEnd(toolIds, toolOutput)
|
||||
submitToolOutputs.push({
|
||||
tool_call_id: actions[i].toolCallId,
|
||||
output: toolOutput
|
||||
})
|
||||
usedTools.push({
|
||||
tool: tool.name,
|
||||
toolInput: actions[i].toolInput,
|
||||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, {
|
||||
tool_outputs: submitToolOutputs
|
||||
})
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.event === 'thread.message.delta') {
|
||||
const chunk = event.data.delta.content?.[0]
|
||||
if (chunk && 'text' in chunk && chunk.text?.value) {
|
||||
text += chunk.text.value
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
socketIO.to(socketIOClientId).emit('start', chunk.text.value)
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('token', chunk.text.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
socketIO.to(socketIOClientId).emit('usedTools', usedTools)
|
||||
} catch (error) {
|
||||
console.error('Error submitting tool outputs:', error)
|
||||
await openai.beta.threads.runs.cancel(threadId, runThreadId)
|
||||
|
||||
const errMsg = `Error submitting tool outputs. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
||||
await analyticHandlers.onLLMError(llmIds, errMsg)
|
||||
await analyticHandlers.onChainError(parentIds, errMsg, true)
|
||||
|
||||
throw new Error(errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List messages
|
||||
const messages = await openai.beta.threads.messages.list(threadId)
|
||||
const messageData = messages.data ?? []
|
||||
const assistantMessages = messageData.filter((msg) => msg.role === 'assistant')
|
||||
if (!assistantMessages.length) return ''
|
||||
|
||||
// Remove images from the logging text
|
||||
let llmOutput = text.replace(imageRegex, '')
|
||||
llmOutput = llmOutput.replace('<br/>', '')
|
||||
|
||||
await analyticHandlers.onLLMEnd(llmIds, llmOutput)
|
||||
await analyticHandlers.onChainEnd(parentIds, messageData, true)
|
||||
|
||||
return {
|
||||
text,
|
||||
usedTools,
|
||||
fileAnnotations,
|
||||
assistant: { assistantId: openAIAssistantId, threadId, runId: runThreadId, messages: messageData }
|
||||
}
|
||||
}
|
||||
|
||||
const promise = (threadId: string, runId: string) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
|
|
@ -299,8 +574,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
|
||||
// Start tool analytics
|
||||
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
|
||||
if (options.socketIO && options.socketIOClientId)
|
||||
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
|
||||
if (socketIO && socketIOClientId) socketIO.to(socketIOClientId).emit('tool', tool.name)
|
||||
|
||||
try {
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
|
||||
|
|
@ -360,7 +634,12 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
|
||||
// Polling run status
|
||||
let runThreadId = runThread.id
|
||||
const runThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
runThreadId = runThread.id
|
||||
let state = await promise(threadId, runThread.id)
|
||||
while (state === 'requires_action') {
|
||||
state = await promise(threadId, runThread.id)
|
||||
|
|
@ -371,7 +650,9 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (retries > 0) {
|
||||
retries -= 1
|
||||
const newRunThread = await openai.beta.threads.runs.create(threadId, {
|
||||
assistant_id: retrievedAssistant.id
|
||||
assistant_id: retrievedAssistant.id,
|
||||
tool_choice: toolChoice,
|
||||
parallel_tool_calls: parallelToolCalls
|
||||
})
|
||||
runThreadId = newRunThread.id
|
||||
state = await promise(threadId, newRunThread.id)
|
||||
|
|
@ -389,46 +670,47 @@ class OpenAIAssistant_Agents implements INode {
|
|||
if (!assistantMessages.length) return ''
|
||||
|
||||
let returnVal = ''
|
||||
const fileAnnotations = []
|
||||
for (let i = 0; i < assistantMessages[0].content.length; i += 1) {
|
||||
if (assistantMessages[0].content[i].type === 'text') {
|
||||
const content = assistantMessages[0].content[i] as MessageContentText
|
||||
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.TextContentBlock
|
||||
|
||||
if (content.text.annotations) {
|
||||
const message_content = content.text
|
||||
const annotations = message_content.annotations
|
||||
|
||||
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
|
||||
|
||||
// Iterate over the annotations
|
||||
for (let index = 0; index < annotations.length; index++) {
|
||||
const annotation = annotations[index]
|
||||
let filePath = ''
|
||||
|
||||
// Gather citations based on annotation attributes
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FileCitation)
|
||||
.file_citation
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
|
||||
|
||||
if (file_citation) {
|
||||
const cited_file = await openai.files.retrieve(file_citation.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
|
||||
if (!disableFileDownload) {
|
||||
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
|
||||
filePath = await downloadFile(openAIApiKey, cited_file, fileName, options.chatflowid, options.chatId)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FilePath).file_path
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
if (file_path) {
|
||||
const cited_file = await openai.files.retrieve(file_path.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', fileName)
|
||||
if (!disableFileDownload) {
|
||||
await downloadFile(cited_file, filePath, dirPath, openAIApiKey)
|
||||
filePath = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
|
|
@ -449,19 +731,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
returnVal += content.text.value
|
||||
}
|
||||
|
||||
const lenticularBracketRegex = /【[^】]*】/g
|
||||
returnVal = returnVal.replace(lenticularBracketRegex, '')
|
||||
} else {
|
||||
const content = assistantMessages[0].content[i] as MessageContentImageFile
|
||||
const content = assistantMessages[0].content[i] as OpenAI.Beta.Threads.Messages.ImageFileContentBlock
|
||||
const fileId = content.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
|
||||
const filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', `${fileObj.filename}.png`)
|
||||
|
||||
await downloadImg(openai, fileId, filePath, dirPath)
|
||||
|
||||
const bitmap = fsDefault.readFileSync(filePath)
|
||||
const base64String = Buffer.from(bitmap).toString('base64')
|
||||
const buffer = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
|
||||
const base64String = Buffer.from(buffer).toString('base64')
|
||||
|
||||
// TODO: Use a file path and retrieve image on the fly. Storing as base64 to localStorage and database will easily hit limits
|
||||
const imgHTML = `<img src="data:image/png;base64,${base64String}" width="100%" height="max-content" alt="${fileObj.filename}" /><br/>`
|
||||
|
|
@ -469,7 +746,6 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const imageRegex = /<img[^>]*\/>/g
|
||||
let llmOutput = returnVal.replace(imageRegex, '')
|
||||
llmOutput = llmOutput.replace('<br/>', '')
|
||||
|
||||
|
|
@ -489,7 +765,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const downloadImg = async (openai: OpenAI, fileId: string, filePath: string, dirPath: string) => {
|
||||
const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...paths: string[]) => {
|
||||
const response = await openai.files.content(fileId)
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
|
|
@ -497,15 +773,14 @@ const downloadImg = async (openai: OpenAI, fileId: string, filePath: string, dir
|
|||
|
||||
// Convert the binary data to a Buffer
|
||||
const image_data_buffer = Buffer.from(image_data)
|
||||
const mime = 'image/png'
|
||||
|
||||
// Save the image to a specific location
|
||||
if (!fsDefault.existsSync(dirPath)) {
|
||||
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
|
||||
}
|
||||
fsDefault.writeFileSync(filePath, image_data_buffer)
|
||||
await addSingleFileToStorage(mime, image_data_buffer, fileName, ...paths)
|
||||
|
||||
return image_data_buffer
|
||||
}
|
||||
|
||||
const downloadFile = async (fileObj: any, filePath: string, dirPath: string, openAIApiKey: string) => {
|
||||
const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string, ...paths: string[]) => {
|
||||
try {
|
||||
const response = await fetch(`https://api.openai.com/v1/files/${fileObj.id}/content`, {
|
||||
method: 'GET',
|
||||
|
|
@ -516,24 +791,21 @@ const downloadFile = async (fileObj: any, filePath: string, dirPath: string, ope
|
|||
throw new Error(`HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
if (!fsDefault.existsSync(dirPath)) {
|
||||
fsDefault.mkdirSync(path.dirname(filePath), { recursive: true })
|
||||
}
|
||||
const dest = fsDefault.createWriteStream(filePath)
|
||||
response.body.pipe(dest)
|
||||
response.body.on('end', () => resolve())
|
||||
dest.on('error', reject)
|
||||
})
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('File downloaded and written to', filePath)
|
||||
// Convert the binary data to a Buffer
|
||||
const data_buffer = Buffer.from(data)
|
||||
const mime = 'application/octet-stream'
|
||||
|
||||
return await addSingleFileToStorage(mime, data_buffer, fileName, ...paths)
|
||||
} catch (error) {
|
||||
console.error('Error downloading or writing the file:', error)
|
||||
return ''
|
||||
}
|
||||
}
|
||||
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreateParams.AssistantToolsFunction => {
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => {
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
|
|
|
|||
|
|
@ -1,151 +0,0 @@
|
|||
import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { flatten } from 'lodash'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { formatToOpenAIFunction } from 'langchain/tools'
|
||||
import { ChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
|
||||
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
|
||||
class OpenAIFunctionAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'OpenAI Function Agent'
|
||||
this.name = 'openAIFunctionAgent'
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'function.svg'
|
||||
this.description = `An agent that uses Function Calling to pick the tool and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return res?.output
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const modelWithFunctions = model.bind({
|
||||
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
|
||||
})
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithFunctions,
|
||||
new OpenAIFunctionsAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: OpenAIFunctionAgent_Agents }
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import type { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { pull } from 'langchain/hub'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { createReactAgent } from '../../../src/agents'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class ReActAgentChat_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'ReAct Agent for Chat Models'
|
||||
this.name = 'reactAgentChat'
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = 'Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the ReAct Agent for Chat Models
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
tools = flatten(tools)
|
||||
|
||||
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
|
||||
let chatPromptTemplate = undefined
|
||||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to vision supported
|
||||
visionChatModel.setVisionModel()
|
||||
const oldTemplate = prompt.template as string
|
||||
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: oldTemplate
|
||||
}
|
||||
])
|
||||
msg.inputVariables = prompt.inputVariables
|
||||
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
const agent = await createReactAgent({
|
||||
llm: model,
|
||||
tools,
|
||||
prompt: chatPromptTemplate ?? prompt
|
||||
})
|
||||
|
||||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
|
||||
const chatHistoryString = chatHistory.map((hist) => hist.message).join('\\n')
|
||||
|
||||
const result = await executor.invoke({ input, chat_history: chatHistoryString }, { callbacks })
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: result?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return result?.output
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ReActAgentChat_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -0,0 +1,110 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { pull } from 'langchain/hub'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import type { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { createReactAgent } from '../../../src/agents'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class ReActAgentLLM_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ReAct Agent for LLMs'
|
||||
this.name = 'reactAgentLLM'
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
this.description = 'Agent that uses the ReAct logic to decide what action to take, optimized to be used with LLMs'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Allowed Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the ReAct Agent for LLMs
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
tools = flatten(tools)
|
||||
|
||||
const prompt = await pull<PromptTemplate>('hwchase17/react')
|
||||
|
||||
const agent = await createReactAgent({
|
||||
llm: model,
|
||||
tools,
|
||||
prompt
|
||||
})
|
||||
|
||||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const result = await executor.invoke({ input }, { callbacks })
|
||||
|
||||
return result?.output
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ReActAgentLLM_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -0,0 +1,264 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool, IVisionChatModal } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
|
||||
class ToolAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Tool Agent'
|
||||
this.name = 'toolAgent'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'toolAgent.png'
|
||||
this.description = `Agent that uses Function Calling to pick the tools and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Tool Calling Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
default: `You are a helpful AI assistant.`,
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
const isStreamable = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
if (isStreamable)
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (isStreamable) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
let output = res?.output as string
|
||||
|
||||
// Claude 3 Opus tends to spit out <thinking>..</thinking> as well, discard that in final output
|
||||
const regexPattern: RegExp = /<thinking>[\s\S]*?<\/thinking>/
|
||||
const matches: RegExpMatchArray | null = output.match(regexPattern)
|
||||
if (matches) {
|
||||
for (const match of matches) {
|
||||
output = output.replace(match, '')
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
let finalRes = output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
const finalRes: ICommonObject = { text: output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
prompt.promptMessages.push(msg)
|
||||
}
|
||||
|
||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
if (model.bindTools === undefined) {
|
||||
throw new Error(`This agent requires that the "bindTools()" method be implemented on the input model.`)
|
||||
}
|
||||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, prependMessages)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithTools,
|
||||
new ToolCallingAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ToolAgent_Agents }
|
||||
|
After Width: | Height: | Size: 17 KiB |
|
|
@ -0,0 +1,255 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { formatLogToMessage } from 'langchain/agents/format_scratchpad/log_to_message'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, XMLAgentOutputParser } from '../../../src/agents'
|
||||
import { Moderation, checkInputs } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions.
|
||||
|
||||
You have access to the following tools:
|
||||
|
||||
{tools}
|
||||
|
||||
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
|
||||
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
|
||||
|
||||
<tool>search</tool><tool_input>weather in SF</tool_input>
|
||||
<observation>64 degrees</observation>
|
||||
|
||||
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
|
||||
|
||||
<final_answer>The weather in SF is 64 degrees</final_answer>
|
||||
|
||||
Begin!
|
||||
|
||||
Previous Conversation:
|
||||
{chat_history}
|
||||
|
||||
Question: {input}
|
||||
{agent_scratchpad}`
|
||||
|
||||
class XMLAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'XML Agent'
|
||||
this.name = 'xmlAgent'
|
||||
this.version = 2.0
|
||||
this.type = 'XMLAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'xmlagent.svg'
|
||||
this.description = `Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
warning: 'Prompt must include input variables: {tools}, {chat_history}, {input} and {agent_scratchpad}',
|
||||
rows: 4,
|
||||
default: defaultSystemMessage,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
let finalRes = res?.output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
finalRes = { text: res?.output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
let promptMessage = systemMessage ? systemMessage : defaultSystemMessage
|
||||
if (memory.memoryKey) promptMessage = promptMessage.replaceAll('{chat_history}', `{${memory.memoryKey}}`)
|
||||
if (memory.inputKey) promptMessage = promptMessage.replaceAll('{input}', `{${memory.inputKey}}`)
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
HumanMessagePromptTemplate.fromTemplate(promptMessage),
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const missingVariables = ['tools', 'agent_scratchpad'].filter((v) => !prompt.inputVariables.includes(v))
|
||||
|
||||
if (missingVariables.length > 0) {
|
||||
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`)
|
||||
}
|
||||
|
||||
const llmWithStop = model.bind({ stop: ['</tool_input>', '</final_answer>'] })
|
||||
|
||||
const messages = (await memory.getChatMessages(flowObj.sessionId, false, prependMessages)) as IMessage[]
|
||||
let chatHistoryMsgTxt = ''
|
||||
for (const message of messages) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistoryMsgTxt += `\\nAI:${message.message}`
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistoryMsgTxt += `\\nHuman:${message.message}`
|
||||
}
|
||||
}
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => formatLogToMessage(i.steps),
|
||||
tools: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) =>
|
||||
tools.map((tool: Tool) => `${tool.name}: ${tool.description}`),
|
||||
[memoryKey]: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) => chatHistoryMsgTxt
|
||||
},
|
||||
prompt,
|
||||
llmWithStop,
|
||||
new XMLAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
isXML: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: XMLAgent_Agents }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-file-type-xml" width="24" height="24" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M14 3v4a1 1 0 0 0 1 1h4" /><path d="M5 12v-7a2 2 0 0 1 2 -2h7l5 5v4" /><path d="M4 15l4 6" /><path d="M4 21l4 -6" /><path d="M19 15v6h3" /><path d="M11 21v-6l2.5 3l2.5 -3v6" /></svg>
|
||||
|
After Width: | Height: | Size: 476 B |
|
|
@ -0,0 +1,3 @@
|
|||
<svg width="38" height="52" viewBox="0 0 38 52" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0 12.383V41.035C0 41.392 0.190002 41.723 0.500002 41.901L17.095 51.481C17.25 51.571 17.422 51.616 17.595 51.616C17.768 51.616 17.94 51.571 18.095 51.481L37.279 40.409C37.589 40.23 37.779 39.9 37.779 39.543V10.887C37.779 10.53 37.589 10.199 37.279 10.021L31.168 6.49498C31.014 6.40598 30.841 6.36098 30.669 6.36098C30.496 6.36098 30.323 6.40498 30.169 6.49498L27.295 8.15398V4.83698C27.295 4.47998 27.105 4.14898 26.795 3.97098L20.684 0.441982C20.529 0.352982 20.357 0.307983 20.184 0.307983C20.011 0.307983 19.839 0.352982 19.684 0.441982L13.781 3.85098C13.471 4.02998 13.281 4.35998 13.281 4.71698V12.157L12.921 12.365V11.872C12.921 11.515 12.731 11.185 12.421 11.006L7.405 8.10698C7.25 8.01798 7.077 7.97298 6.905 7.97298C6.733 7.97298 6.56 8.01798 6.405 8.10698L0.501001 11.517C0.191001 11.695 0 12.025 0 12.383ZM1.5 13.248L5.519 15.566V23.294C5.519 23.304 5.524 23.313 5.525 23.323C5.526 23.345 5.529 23.366 5.534 23.388C5.538 23.411 5.544 23.433 5.552 23.455C5.559 23.476 5.567 23.496 5.577 23.516C5.582 23.525 5.581 23.535 5.587 23.544C5.591 23.551 5.6 23.554 5.604 23.561C5.617 23.581 5.63 23.6 5.646 23.618C5.669 23.644 5.695 23.665 5.724 23.686C5.741 23.698 5.751 23.716 5.77 23.727L11.236 26.886C11.243 26.89 11.252 26.888 11.26 26.892C11.328 26.927 11.402 26.952 11.484 26.952C11.566 26.952 11.641 26.928 11.709 26.893C11.728 26.883 11.743 26.87 11.761 26.858C11.812 26.823 11.855 26.781 11.89 26.731C11.898 26.719 11.911 26.715 11.919 26.702C11.924 26.693 11.924 26.682 11.929 26.674C11.944 26.644 11.951 26.613 11.96 26.58C11.969 26.547 11.978 26.515 11.98 26.481C11.98 26.471 11.986 26.462 11.986 26.452V20.138V19.302L17.096 22.251V49.749L1.5 40.747V13.248ZM35.778 10.887L30.879 13.718L25.768 10.766L26.544 10.317L30.668 7.93698L35.778 10.887ZM25.293 4.83598L20.391 7.66498L15.281 4.71598L20.183 1.88398L25.293 4.83598ZM10.92 11.872L6.019 14.701L2.001 12.383L6.904 9.55098L10.92 11.872ZM20.956 16.51L24.268 14.601V18.788C24.268 18.809 24.278 18.827 24.28 18.848C24.284 18.883 24.29 18.917 24.301 18.95C24.311 18.98 24.325 19.007 24.342 19.034C24.358 19.061 24.373 19.088 24.395 19.112C24.417 19.138 24.444 19.159 24.471 19.18C24.489 19.193 24.499 19.21 24.518 19.221L29.878 22.314L23.998 25.708V18.557C23.998 18.547 23.993 18.538 23.992 18.528C23.991 18.506 23.988 18.485 23.984 18.463C23.979 18.44 23.973 18.418 23.965 18.396C23.958 18.375 23.95 18.355 23.941 18.336C23.936 18.327 23.937 18.316 23.931 18.308C23.925 18.299 23.917 18.294 23.911 18.286C23.898 18.267 23.886 18.251 23.871 18.234C23.855 18.216 23.84 18.2 23.822 18.185C23.805 18.17 23.788 18.157 23.769 18.144C23.76 18.138 23.756 18.129 23.747 18.124L20.956 16.51ZM25.268 11.633L30.379 14.585V21.448L25.268 18.499V13.736V11.633ZM12.486 18.437L17.389 15.604L22.498 18.556L17.595 21.385L12.486 18.437ZM10.985 25.587L7.019 23.295L10.985 21.005V25.587ZM12.42 14.385L14.28 13.311L16.822 14.777L12.42 17.32V14.385ZM14.78 5.58198L19.891 8.53098V15.394L14.78 12.445V5.58198Z" fill="#213B41"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.0 KiB |
|
|
@ -0,0 +1,33 @@
|
|||
import { INode, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class LangWatch_Analytic implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs?: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'LangWatch'
|
||||
this.name = 'LangWatch'
|
||||
this.version = 1.0
|
||||
this.type = 'LangWatch'
|
||||
this.icon = 'LangWatch.svg'
|
||||
this.category = 'Analytic'
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = []
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['langwatchApi']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LangWatch_Analytic }
|
||||
|
Before Width: | Height: | Size: 1.0 KiB After Width: | Height: | Size: 1.0 KiB |
|
|
@ -1,6 +1,6 @@
|
|||
import { INode, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class LLMonitor_Analytic implements INode {
|
||||
class Lunary_Analytic implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -13,10 +13,10 @@ class LLMonitor_Analytic implements INode {
|
|||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'LLMonitor'
|
||||
this.name = 'llmonitor'
|
||||
this.label = 'Lunary'
|
||||
this.name = 'lunary'
|
||||
this.version = 1.0
|
||||
this.type = 'LLMonitor'
|
||||
this.type = 'Lunary'
|
||||
this.icon = 'Lunary.svg'
|
||||
this.category = 'Analytic'
|
||||
this.baseClasses = [this.type]
|
||||
|
|
@ -25,9 +25,9 @@ class LLMonitor_Analytic implements INode {
|
|||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['llmonitorApi']
|
||||
credentialNames: ['lunaryApi']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLMonitor_Analytic }
|
||||
module.exports = { nodeClass: Lunary_Analytic }
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import { getBaseClasses, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import hash from 'object-hash'
|
||||
import { getBaseClasses, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
|
||||
class InMemoryCache implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { getBaseClasses, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { Embeddings } from '@langchain/core/embeddings'
|
||||
import { BaseStore } from '@langchain/core/stores'
|
||||
import { CacheBackedEmbeddings } from 'langchain/embeddings/cache_backed'
|
||||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { BaseStore } from 'langchain/schema/storage'
|
||||
import { getBaseClasses, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
|
||||
class InMemoryEmbeddingCache implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { MomentoCache as LangchainMomentoCache } from 'langchain/cache/momento'
|
||||
import { CacheClient, Configurations, CredentialProvider } from '@gomomento/sdk'
|
||||
import { MomentoCache as LangchainMomentoCache } from '@langchain/community/caches/momento'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
|
||||
class MomentoCache implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,8 +1,46 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { RedisCache as LangchainRedisCache } from 'langchain/cache/ioredis'
|
||||
import { Redis } from 'ioredis'
|
||||
import { Generation, ChatGeneration, StoredGeneration, mapStoredMessageToChatMessage } from 'langchain/schema'
|
||||
import { Redis, RedisOptions } from 'ioredis'
|
||||
import { isEqual } from 'lodash'
|
||||
import hash from 'object-hash'
|
||||
import { RedisCache as LangchainRedisCache } from '@langchain/community/caches/ioredis'
|
||||
import { StoredGeneration, mapStoredMessageToChatMessage } from '@langchain/core/messages'
|
||||
import { Generation, ChatGeneration } from '@langchain/core/outputs'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
|
||||
let redisClientSingleton: Redis
|
||||
let redisClientOption: RedisOptions
|
||||
let redisClientUrl: string
|
||||
|
||||
const getRedisClientbyOption = (option: RedisOptions) => {
|
||||
if (!redisClientSingleton) {
|
||||
// if client doesn't exists
|
||||
redisClientSingleton = new Redis(option)
|
||||
redisClientOption = option
|
||||
return redisClientSingleton
|
||||
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
|
||||
// if client exists but option changed
|
||||
redisClientSingleton.quit()
|
||||
redisClientSingleton = new Redis(option)
|
||||
redisClientOption = option
|
||||
return redisClientSingleton
|
||||
}
|
||||
return redisClientSingleton
|
||||
}
|
||||
|
||||
const getRedisClientbyUrl = (url: string) => {
|
||||
if (!redisClientSingleton) {
|
||||
// if client doesn't exists
|
||||
redisClientSingleton = new Redis(url)
|
||||
redisClientUrl = url
|
||||
return redisClientSingleton
|
||||
} else if (redisClientSingleton && url !== redisClientUrl) {
|
||||
// if client exists but option changed
|
||||
redisClientSingleton.quit()
|
||||
redisClientSingleton = new Redis(url)
|
||||
redisClientUrl = url
|
||||
return redisClientSingleton
|
||||
}
|
||||
return redisClientSingleton
|
||||
}
|
||||
|
||||
class RedisCache implements INode {
|
||||
label: string
|
||||
|
|
@ -60,7 +98,7 @@ class RedisCache implements INode {
|
|||
|
||||
const tlsOptions = sslEnabled === true ? { tls: { rejectUnauthorized: false } } : {}
|
||||
|
||||
client = new Redis({
|
||||
client = getRedisClientbyOption({
|
||||
port: portStr ? parseInt(portStr) : 6379,
|
||||
host,
|
||||
username,
|
||||
|
|
@ -68,7 +106,7 @@ class RedisCache implements INode {
|
|||
...tlsOptions
|
||||
})
|
||||
} else {
|
||||
client = new Redis(redisUrl)
|
||||
client = getRedisClientbyUrl(redisUrl)
|
||||
}
|
||||
|
||||
const redisClient = new LangchainRedisCache(client)
|
||||
|
|
@ -94,7 +132,7 @@ class RedisCache implements INode {
|
|||
for (let i = 0; i < value.length; i += 1) {
|
||||
const key = getCacheKey(prompt, llmKey, String(i))
|
||||
if (ttl) {
|
||||
await client.set(key, JSON.stringify(serializeGeneration(value[i])), 'EX', parseInt(ttl, 10))
|
||||
await client.set(key, JSON.stringify(serializeGeneration(value[i])), 'PX', parseInt(ttl, 10))
|
||||
} else {
|
||||
await client.set(key, JSON.stringify(serializeGeneration(value[i])))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,45 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { Redis } from 'ioredis'
|
||||
import { Redis, RedisOptions } from 'ioredis'
|
||||
import { isEqual } from 'lodash'
|
||||
import { RedisByteStore } from '@langchain/community/storage/ioredis'
|
||||
import { Embeddings } from '@langchain/core/embeddings'
|
||||
import { CacheBackedEmbeddings } from 'langchain/embeddings/cache_backed'
|
||||
import { RedisByteStore } from 'langchain/storage/ioredis'
|
||||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
|
||||
let redisClientSingleton: Redis
|
||||
let redisClientOption: RedisOptions
|
||||
let redisClientUrl: string
|
||||
|
||||
const getRedisClientbyOption = (option: RedisOptions) => {
|
||||
if (!redisClientSingleton) {
|
||||
// if client doesn't exists
|
||||
redisClientSingleton = new Redis(option)
|
||||
redisClientOption = option
|
||||
return redisClientSingleton
|
||||
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
|
||||
// if client exists but option changed
|
||||
redisClientSingleton.quit()
|
||||
redisClientSingleton = new Redis(option)
|
||||
redisClientOption = option
|
||||
return redisClientSingleton
|
||||
}
|
||||
return redisClientSingleton
|
||||
}
|
||||
|
||||
const getRedisClientbyUrl = (url: string) => {
|
||||
if (!redisClientSingleton) {
|
||||
// if client doesn't exists
|
||||
redisClientSingleton = new Redis(url)
|
||||
redisClientUrl = url
|
||||
return redisClientSingleton
|
||||
} else if (redisClientSingleton && url !== redisClientUrl) {
|
||||
// if client exists but option changed
|
||||
redisClientSingleton.quit()
|
||||
redisClientSingleton = new Redis(url)
|
||||
redisClientUrl = url
|
||||
return redisClientSingleton
|
||||
}
|
||||
return redisClientSingleton
|
||||
}
|
||||
|
||||
class RedisEmbeddingsCache implements INode {
|
||||
label: string
|
||||
|
|
@ -75,7 +112,7 @@ class RedisEmbeddingsCache implements INode {
|
|||
|
||||
const tlsOptions = sslEnabled === true ? { tls: { rejectUnauthorized: false } } : {}
|
||||
|
||||
client = new Redis({
|
||||
client = getRedisClientbyOption({
|
||||
port: portStr ? parseInt(portStr) : 6379,
|
||||
host,
|
||||
username,
|
||||
|
|
@ -83,7 +120,7 @@ class RedisEmbeddingsCache implements INode {
|
|||
...tlsOptions
|
||||
})
|
||||
} else {
|
||||
client = new Redis(redisUrl)
|
||||
client = getRedisClientbyUrl(redisUrl)
|
||||
}
|
||||
|
||||
ttl ??= '3600'
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { UpstashRedisCache as LangchainUpstashRedisCache } from '@langchain/community/caches/upstash_redis'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { UpstashRedisCache as LangchainUpstashRedisCache } from 'langchain/cache/upstash_redis'
|
||||
|
||||
class UpstashRedisCache implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { APIChain } from 'langchain/chains'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
|
||||
export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ChatOpenAI } from '@langchain/openai'
|
||||
import { APIChain, createOpenAPIChain } from 'langchain/chains'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { getFileFromStorage } from '../../../src'
|
||||
|
||||
class OpenApiChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +21,7 @@ class OpenApiChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAPI Chain'
|
||||
this.name = 'openApiChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'OpenAPIChain'
|
||||
this.icon = 'openapi.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -50,19 +53,37 @@ class OpenApiChain_Chains implements INode {
|
|||
type: 'json',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
return await initChain(nodeData)
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
return await initChain(nodeData, options)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const chain = await initChain(nodeData)
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = await initChain(nodeData, options)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAPI chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
const res = await chain.run(input, [loggerHandler, handler, ...callbacks])
|
||||
|
|
@ -74,7 +95,7 @@ class OpenApiChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const initChain = async (nodeData: INodeData) => {
|
||||
const initChain = async (nodeData: INodeData, options: ICommonObject) => {
|
||||
const model = nodeData.inputs?.model as ChatOpenAI
|
||||
const headers = nodeData.inputs?.headers as string
|
||||
const yamlLink = nodeData.inputs?.yamlLink as string
|
||||
|
|
@ -85,10 +106,17 @@ const initChain = async (nodeData: INodeData) => {
|
|||
if (yamlLink) {
|
||||
yamlString = yamlLink
|
||||
} else {
|
||||
const splitDataURI = yamlFileBase64.split(',')
|
||||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
yamlString = bf.toString('utf-8')
|
||||
if (yamlFileBase64.startsWith('FILE-STORAGE::')) {
|
||||
const file = yamlFileBase64.replace('FILE-STORAGE::', '')
|
||||
const chatflowid = options.chatflowid
|
||||
const fileData = await getFileFromStorage(file, chatflowid)
|
||||
yamlString = fileData.toString()
|
||||
} else {
|
||||
const splitDataURI = yamlFileBase64.split(',')
|
||||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
yamlString = bf.toString('utf-8')
|
||||
}
|
||||
}
|
||||
|
||||
return await createOpenAPIChain(yamlString, {
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { API_RESPONSE_RAW_PROMPT_TEMPLATE, API_URL_RAW_PROMPT_TEMPLATE, APIChain } from './postCore'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
|
||||
class POSTApiChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { CallbackManagerForChainRun } from 'langchain/callbacks'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { CallbackManagerForChainRun } from '@langchain/core/callbacks/manager'
|
||||
import { BaseChain, ChainInputs, LLMChain, SerializedAPIChain } from 'langchain/chains'
|
||||
import { BasePromptTemplate, PromptTemplate } from 'langchain/prompts'
|
||||
import { ChainValues } from 'langchain/schema'
|
||||
import { BasePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import fetch from 'node-fetch'
|
||||
|
||||
export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,32 @@
|
|||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConversationChain } from 'langchain/chains'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { StringOutputParser } from 'langchain/schema/output_parser'
|
||||
import {
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
BaseMessagePromptTemplateLike,
|
||||
PromptTemplate
|
||||
} from '@langchain/core/prompts'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { HumanMessage } from '@langchain/core/messages'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import {
|
||||
IVisionChatModal,
|
||||
FlowiseMemory,
|
||||
ICommonObject,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeParams,
|
||||
MessageContentImageUrl
|
||||
} from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
||||
const inputKey = 'input'
|
||||
|
|
@ -26,7 +46,7 @@ class ConversationChain_Chains implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversation Chain'
|
||||
this.name = 'conversationChain'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ConversationChain'
|
||||
this.icon = 'conv.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -60,6 +80,14 @@ class ConversationChain_Chains implements INode {
|
|||
optional: true,
|
||||
list: true
|
||||
},*/
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessagePrompt',
|
||||
|
|
@ -76,13 +104,26 @@ class ConversationChain_Chains implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
|
||||
const chain = prepareChain(nodeData, options, this.sessionId)
|
||||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory
|
||||
const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
|
||||
|
||||
const chain = await prepareChain(nodeData, options, this.sessionId)
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the LLM chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const additionalCallback = await additionalCallbacks(nodeData, options)
|
||||
|
|
@ -120,20 +161,34 @@ class ConversationChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const prepareChatPrompt = (nodeData: INodeData) => {
|
||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
|
||||
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
|
||||
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
||||
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
sysPrompt,
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
humanPrompt
|
||||
])
|
||||
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
||||
|
||||
// OpenAI works better when separate images into standalone human messages
|
||||
if (model instanceof ChatOpenAI && humanImageMessages.length) {
|
||||
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
|
||||
} else if (humanImageMessages.length) {
|
||||
const lastMessage = messages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...humanImageMessages,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
messages.push(msg)
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
if ((chatPromptTemplate as any).promptValues) {
|
||||
// @ts-ignore
|
||||
chatPrompt.promptValues = (chatPromptTemplate as any).promptValues
|
||||
|
|
@ -142,21 +197,44 @@ const prepareChatPrompt = (nodeData: INodeData) => {
|
|||
return chatPrompt
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
const messages: BaseMessagePromptTemplateLike[] = [
|
||||
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
|
||||
])
|
||||
]
|
||||
|
||||
// OpenAI works better when separate images into standalone human messages
|
||||
if (model instanceof ChatOpenAI && humanImageMessages.length) {
|
||||
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
|
||||
} else if (humanImageMessages.length) {
|
||||
messages.pop()
|
||||
messages.push(HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages]))
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
|
||||
return chatPrompt
|
||||
}
|
||||
|
||||
const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const prepareChain = async (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData)
|
||||
let messageContent: MessageContentImageUrl[] = []
|
||||
if (llmSupportsVision(model)) {
|
||||
messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData, messageContent)
|
||||
let promptVariables = {}
|
||||
const promptValuesRaw = (chatPrompt as any).promptValues
|
||||
if (promptValuesRaw) {
|
||||
|
|
@ -175,12 +253,12 @@ const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMes
|
|||
{
|
||||
[inputKey]: (input: { input: string }) => input.input,
|
||||
[memoryKey]: async () => {
|
||||
const history = await memory.getChatMessages(sessionId, true, chatHistory)
|
||||
const history = await memory.getChatMessages(sessionId, true, prependMessages)
|
||||
return history
|
||||
},
|
||||
...promptVariables
|
||||
},
|
||||
chatPrompt,
|
||||
prepareChatPrompt(nodeData, messageContent),
|
||||
model,
|
||||
new StringOutputParser()
|
||||
])
|
||||
|
|
|
|||
|
|
@ -1,19 +1,30 @@
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ConversationalRetrievalQAChain } from 'langchain/chains'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { BufferMemoryInput } from 'langchain/memory'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
|
||||
import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from 'langchain/schema/runnable'
|
||||
import { BaseMessage, HumanMessage, AIMessage } from 'langchain/schema'
|
||||
import { StringOutputParser } from 'langchain/schema/output_parser'
|
||||
import type { Document } from 'langchain/document'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
|
||||
import { applyPatch } from 'fast-json-patch'
|
||||
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables'
|
||||
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BufferMemoryInput } from 'langchain/memory'
|
||||
import { ConversationalRetrievalQAChain } from 'langchain/chains'
|
||||
import { getBaseClasses, mapChatMessageToBaseMessage } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
ICommonObject,
|
||||
IMessage,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeParams,
|
||||
IDatabaseEntity,
|
||||
MemoryMethods
|
||||
} from '../../../src/Interface'
|
||||
import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
|
||||
|
||||
type RetrievalChainInput = {
|
||||
chat_history: string
|
||||
|
|
@ -37,7 +48,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval QA Chain'
|
||||
this.name = 'conversationalRetrievalQAChain'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ConversationalRetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -88,6 +99,14 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
additionalParams: true,
|
||||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
/** Deprecated
|
||||
{
|
||||
|
|
@ -156,6 +175,11 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
const responsePrompt = nodeData.inputs?.responsePrompt as string
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
const prependMessages = options?.prependMessages
|
||||
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const chatflowid = options.chatflowid as string
|
||||
|
||||
let customResponsePrompt = responsePrompt
|
||||
// If the deprecated systemMessagePrompt is still exists
|
||||
|
|
@ -164,17 +188,30 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
}
|
||||
|
||||
let memory: FlowiseMemory | undefined = externalMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (!memory) {
|
||||
memory = new BufferMemory({
|
||||
returnMessages: true,
|
||||
memoryKey: 'chat_history',
|
||||
inputKey: 'input'
|
||||
appDataSource,
|
||||
databaseEntities,
|
||||
chatflowid
|
||||
})
|
||||
}
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Conversational Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
|
||||
|
||||
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
|
||||
const history = ((await memory.getChatMessages(this.sessionId, false, prependMessages)) as IMessage[]) ?? []
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const additionalCallback = await additionalCallbacks(nodeData, options)
|
||||
|
|
@ -347,31 +384,67 @@ const createChain = (
|
|||
return conversationalQAChain
|
||||
}
|
||||
|
||||
interface BufferMemoryExtendedInput {
|
||||
appDataSource: DataSource
|
||||
databaseEntities: IDatabaseEntity
|
||||
chatflowid: string
|
||||
}
|
||||
|
||||
class BufferMemory extends FlowiseMemory implements MemoryMethods {
|
||||
constructor(fields: BufferMemoryInput) {
|
||||
appDataSource: DataSource
|
||||
databaseEntities: IDatabaseEntity
|
||||
chatflowid: string
|
||||
|
||||
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
|
||||
super(fields)
|
||||
this.appDataSource = fields.appDataSource
|
||||
this.databaseEntities = fields.databaseEntities
|
||||
this.chatflowid = fields.chatflowid
|
||||
}
|
||||
|
||||
async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
|
||||
await this.chatHistory.clear()
|
||||
async getChatMessages(
|
||||
overrideSessionId = '',
|
||||
returnBaseMessages = false,
|
||||
prependMessages?: IMessage[]
|
||||
): Promise<IMessage[] | BaseMessage[]> {
|
||||
if (!overrideSessionId) return []
|
||||
|
||||
for (const msg of prevHistory) {
|
||||
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
|
||||
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
|
||||
const chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({
|
||||
where: {
|
||||
sessionId: overrideSessionId,
|
||||
chatflowid: this.chatflowid
|
||||
},
|
||||
order: {
|
||||
createdDate: 'ASC'
|
||||
}
|
||||
})
|
||||
|
||||
if (prependMessages?.length) {
|
||||
chatMessage.unshift(...prependMessages)
|
||||
}
|
||||
|
||||
const memoryResult = await this.loadMemoryVariables({})
|
||||
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
|
||||
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
|
||||
if (returnBaseMessages) {
|
||||
return mapChatMessageToBaseMessage(chatMessage)
|
||||
}
|
||||
|
||||
let returnIMessages: IMessage[] = []
|
||||
for (const m of chatMessage) {
|
||||
returnIMessages.push({
|
||||
message: m.content as string,
|
||||
type: m.role
|
||||
})
|
||||
}
|
||||
return returnIMessages
|
||||
}
|
||||
|
||||
async addChatMessages(): Promise<void> {
|
||||
// adding chat messages will be done on the fly in getChatMessages()
|
||||
// adding chat messages is done on server level
|
||||
return
|
||||
}
|
||||
|
||||
async clearChatMessages(): Promise<void> {
|
||||
await this.clear()
|
||||
// clearing chat messages is done on server level
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { BaseOutputParser } from 'langchain/schema/output_parser'
|
||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
||||
import { BaseLLMOutputParser } from 'langchain/schema/output_parser'
|
||||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
||||
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
|
||||
import { HumanMessage } from '@langchain/core/messages'
|
||||
import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { OutputFixingParser } from 'langchain/output_parsers'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { IVisionChatModal, ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
|
||||
class LLMChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -108,7 +110,9 @@ class LLMChain_Chains implements INode {
|
|||
})
|
||||
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// Disable streaming because its not final chain
|
||||
const disableStreaming = true
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData, disableStreaming)
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
@ -152,21 +156,16 @@ const runPrediction = async (
|
|||
input: string,
|
||||
promptValuesRaw: ICommonObject | undefined,
|
||||
options: ICommonObject,
|
||||
nodeData: INodeData
|
||||
nodeData: INodeData,
|
||||
disableStreaming?: boolean
|
||||
) => {
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const isStreaming = options.socketIO && options.socketIOClientId
|
||||
const isStreaming = !disableStreaming && options.socketIO && options.socketIOClientId
|
||||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
/**
|
||||
* Apply string transformation to reverse converted special chars:
|
||||
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
|
||||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||
*/
|
||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -179,6 +178,60 @@ const runPrediction = async (
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply string transformation to reverse converted special chars:
|
||||
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
|
||||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||
*/
|
||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||
|
||||
if (llmSupportsVision(chain.llm)) {
|
||||
const visionChatModel = chain.llm as IVisionChatModal
|
||||
const messageContent = await addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
||||
visionChatModel.setVisionModel()
|
||||
// Add image to the message
|
||||
if (chain.prompt instanceof PromptTemplate) {
|
||||
const existingPromptTemplate = chain.prompt.template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: existingPromptTemplate
|
||||
}
|
||||
])
|
||||
msg.inputVariables = chain.prompt.inputVariables
|
||||
chain.prompt = ChatPromptTemplate.fromMessages([msg])
|
||||
} else if (chain.prompt instanceof ChatPromptTemplate) {
|
||||
if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
chain.prompt.promptMessages.push(msg)
|
||||
} else {
|
||||
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
}
|
||||
} else if (chain.prompt instanceof FewShotPromptTemplate) {
|
||||
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
|
||||
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
|
||||
HumanMessagePromptTemplate.fromTemplate(existingFewShotPromptTemplate)
|
||||
])
|
||||
newFewShotPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
// @ts-ignore
|
||||
chain.prompt.examplePrompt = newFewShotPromptTemplate
|
||||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
if (promptValues && inputVariables.length > 0) {
|
||||
let seen: string[] = []
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { MultiPromptChain } from 'langchain/chains'
|
||||
import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { MultiPromptChain } from 'langchain/chains'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MultiPromptChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class MultiPromptChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Multi Prompt Chain'
|
||||
this.name = 'multiPromptChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'MultiPromptChain'
|
||||
this.icon = 'prompt.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class MultiPromptChain_Chains implements INode {
|
|||
name: 'promptRetriever',
|
||||
type: 'PromptRetriever',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -62,8 +72,19 @@ class MultiPromptChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as MultiPromptChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Multi Prompt Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = { input }
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { MultiRetrievalQAChain } from 'langchain/chains'
|
||||
import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { MultiRetrievalQAChain } from 'langchain/chains'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MultiRetrievalQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Multi Retrieval QA Chain'
|
||||
this.name = 'multiRetrievalQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'MultiRetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -41,6 +43,14 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
name: 'returnSourceDocuments',
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -72,7 +82,17 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const chain = nodeData.instance as MultiRetrievalQAChain
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Multi Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = { input }
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { RetrievalQAChain } from 'langchain/chains'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class RetrievalQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -19,7 +21,7 @@ class RetrievalQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Retrieval QA Chain'
|
||||
this.name = 'retrievalQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'RetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class RetrievalQAChain_Chains implements INode {
|
|||
label: 'Vector Store Retriever',
|
||||
name: 'vectorStoreRetriever',
|
||||
type: 'BaseRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -47,8 +57,19 @@ class RetrievalQAChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as RetrievalQAChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = {
|
||||
query: input
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { SqlDatabaseChain, SqlDatabaseChainInput, DEFAULT_SQL_DATABASE_PROMPT } from 'langchain/chains/sql_db'
|
||||
import { getBaseClasses, getInputVariables } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { SqlDatabase } from 'langchain/sql_db'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { PromptTemplate, PromptTemplateInput } from 'langchain/prompts'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { DataSourceOptions } from 'typeorm/data-source'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { PromptTemplate, PromptTemplateInput } from '@langchain/core/prompts'
|
||||
import { SqlDatabaseChain, SqlDatabaseChainInput, DEFAULT_SQL_DATABASE_PROMPT } from 'langchain/chains/sql_db'
|
||||
import { SqlDatabase } from 'langchain/sql_db'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, getInputVariables } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
|
||||
|
||||
|
|
@ -24,7 +26,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Sql Database Chain'
|
||||
this.name = 'sqlDatabaseChain'
|
||||
this.version = 4.0
|
||||
this.version = 5.0
|
||||
this.type = 'SqlDatabaseChain'
|
||||
this.icon = 'sqlchain.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -70,7 +72,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Include Tables',
|
||||
name: 'includesTables',
|
||||
type: 'string',
|
||||
description: 'Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables',
|
||||
description: 'Tables to include for queries, separated by comma. Can only use Include Tables or Ignore Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
@ -79,7 +81,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Ignore Tables',
|
||||
name: 'ignoreTables',
|
||||
type: 'string',
|
||||
description: 'Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables',
|
||||
description: 'Tables to ignore for queries, separated by comma. Can only use Ignore Tables or Include Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
@ -115,6 +117,14 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -144,7 +154,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const databaseType = nodeData.inputs?.database as DatabaseType
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const url = nodeData.inputs?.url as string
|
||||
|
|
@ -155,6 +165,17 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number
|
||||
const topK = nodeData.inputs?.topK as number
|
||||
const customPrompt = nodeData.inputs?.customPrompt as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Sql Database Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const chain = await getSQLDBChain(
|
||||
databaseType,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import fetch from 'node-fetch'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { VectaraStore } from '@langchain/community/vectorstores/vectara'
|
||||
import { VectorDBQAChain } from 'langchain/chains'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { VectorDBQAChain } from 'langchain/chains'
|
||||
import { Document } from 'langchain/document'
|
||||
import { VectaraStore } from 'langchain/vectorstores/vectara'
|
||||
import fetch from 'node-fetch'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
// functionality based on https://github.com/vectara/vectara-answer
|
||||
const reorderCitations = (unorderedSummary: string) => {
|
||||
|
|
@ -48,7 +50,7 @@ class VectaraChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Vectara QA Chain'
|
||||
this.name = 'vectaraQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'VectaraQAChain'
|
||||
this.icon = 'vectara.png'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -219,6 +221,14 @@ class VectaraChain_Chains implements INode {
|
|||
description: 'Maximum results used to build the summarized response',
|
||||
type: 'number',
|
||||
default: 7
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -227,7 +237,7 @@ class VectaraChain_Chains implements INode {
|
|||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<object> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
|
||||
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
|
||||
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
|
||||
|
|
@ -252,6 +262,18 @@ class VectaraChain_Chains implements INode {
|
|||
const mmrRerankerId = 272725718
|
||||
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Vectara chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const data = {
|
||||
query: [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { VectorStore } from '@langchain/core/vectorstores'
|
||||
import { VectorDBQAChain } from 'langchain/chains'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { VectorDBQAChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { VectorStore } from 'langchain/vectorstores/base'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class VectorDBQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -19,7 +21,7 @@ class VectorDBQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'VectorDB QA Chain'
|
||||
this.name = 'vectorDBQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'VectorDBQAChain'
|
||||
this.icon = 'vectordb.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class VectorDBQAChain_Chains implements INode {
|
|||
label: 'Vector Store',
|
||||
name: 'vectorStore',
|
||||
type: 'VectorStore'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -50,8 +60,20 @@ class VectorDBQAChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as VectorDBQAChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the VectorDB QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = {
|
||||
query: input
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,12 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { BedrockChat } from 'langchain/chat_models/bedrock'
|
||||
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseChatModelParams } from 'langchain/chat_models/base'
|
||||
import { BedrockChat } from './FlowiseAWSChatBedrock'
|
||||
import { getModels, getRegions, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
/**
|
||||
* I had to run the following to build the component
|
||||
* and get the icon copied over to the dist directory
|
||||
* Flowise/packages/components > yarn build
|
||||
*
|
||||
* @author Michael Connor <mlconnor@yahoo.com>
|
||||
*/
|
||||
class AWSChatBedrock_ChatModels implements INode {
|
||||
|
|
@ -27,7 +24,7 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS ChatBedrock'
|
||||
this.name = 'awsChatBedrock'
|
||||
this.version = 3.0
|
||||
this.version = 5.0
|
||||
this.type = 'AWSChatBedrock'
|
||||
this.icon = 'aws.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -50,57 +47,16 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
{
|
||||
label: 'Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'af-south-1', name: 'af-south-1' },
|
||||
{ label: 'ap-east-1', name: 'ap-east-1' },
|
||||
{ label: 'ap-northeast-1', name: 'ap-northeast-1' },
|
||||
{ label: 'ap-northeast-2', name: 'ap-northeast-2' },
|
||||
{ label: 'ap-northeast-3', name: 'ap-northeast-3' },
|
||||
{ label: 'ap-south-1', name: 'ap-south-1' },
|
||||
{ label: 'ap-south-2', name: 'ap-south-2' },
|
||||
{ label: 'ap-southeast-1', name: 'ap-southeast-1' },
|
||||
{ label: 'ap-southeast-2', name: 'ap-southeast-2' },
|
||||
{ label: 'ap-southeast-3', name: 'ap-southeast-3' },
|
||||
{ label: 'ap-southeast-4', name: 'ap-southeast-4' },
|
||||
{ label: 'ap-southeast-5', name: 'ap-southeast-5' },
|
||||
{ label: 'ap-southeast-6', name: 'ap-southeast-6' },
|
||||
{ label: 'ca-central-1', name: 'ca-central-1' },
|
||||
{ label: 'ca-west-1', name: 'ca-west-1' },
|
||||
{ label: 'cn-north-1', name: 'cn-north-1' },
|
||||
{ label: 'cn-northwest-1', name: 'cn-northwest-1' },
|
||||
{ label: 'eu-central-1', name: 'eu-central-1' },
|
||||
{ label: 'eu-central-2', name: 'eu-central-2' },
|
||||
{ label: 'eu-north-1', name: 'eu-north-1' },
|
||||
{ label: 'eu-south-1', name: 'eu-south-1' },
|
||||
{ label: 'eu-south-2', name: 'eu-south-2' },
|
||||
{ label: 'eu-west-1', name: 'eu-west-1' },
|
||||
{ label: 'eu-west-2', name: 'eu-west-2' },
|
||||
{ label: 'eu-west-3', name: 'eu-west-3' },
|
||||
{ label: 'il-central-1', name: 'il-central-1' },
|
||||
{ label: 'me-central-1', name: 'me-central-1' },
|
||||
{ label: 'me-south-1', name: 'me-south-1' },
|
||||
{ label: 'sa-east-1', name: 'sa-east-1' },
|
||||
{ label: 'us-east-1', name: 'us-east-1' },
|
||||
{ label: 'us-east-2', name: 'us-east-2' },
|
||||
{ label: 'us-gov-east-1', name: 'us-gov-east-1' },
|
||||
{ label: 'us-gov-west-1', name: 'us-gov-west-1' },
|
||||
{ label: 'us-west-1', name: 'us-west-1' },
|
||||
{ label: 'us-west-2', name: 'us-west-2' }
|
||||
],
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRegions',
|
||||
default: 'us-east-1'
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
|
||||
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
|
||||
],
|
||||
default: 'anthropic.claude-v2'
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'anthropic.claude-3-haiku'
|
||||
},
|
||||
{
|
||||
label: 'Custom Model Name',
|
||||
|
|
@ -128,10 +84,29 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
optional: true,
|
||||
additionalParams: true,
|
||||
default: 200
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Only works with claude-3-* models when image is being uploaded from chat. Compatible with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'awsChatBedrock')
|
||||
},
|
||||
async listRegions(): Promise<INodeOptionsValue[]> {
|
||||
return await getRegions(MODEL_TYPE.CHAT, 'awsChatBedrock')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const iRegion = nodeData.inputs?.region as string
|
||||
const iModel = nodeData.inputs?.model as string
|
||||
|
|
@ -170,7 +145,16 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const amazonBedrock = new BedrockChat(obj)
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const amazonBedrock = new BedrockChat(nodeData.id, obj)
|
||||
if (obj.model.includes('anthropic.claude-3')) amazonBedrock.setMultiModalOption(multiModalOption)
|
||||
return amazonBedrock
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BedrockChat as LCBedrockChat } from '@langchain/community/chat_models/bedrock'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
import { IVisionChatModal, IMultiModalOption } from '../../../src'
|
||||
|
||||
export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields: BaseBedrockInput & BaseChatModelParams) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.model || ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
super.model = this.configuredModel
|
||||
super.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (!this.model.startsWith('claude-3')) {
|
||||
super.model = 'anthropic.claude-3-haiku-20240307-v1:0'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
class AzureChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -19,12 +21,12 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'Azure ChatOpenAI'
|
||||
this.name = 'azureChatOpenAI'
|
||||
this.version = 2.0
|
||||
this.version = 4.0
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -41,27 +43,8 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-35-turbo',
|
||||
name: 'gpt-35-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-35-turbo-16k',
|
||||
name: 'gpt-35-turbo-16k'
|
||||
}
|
||||
],
|
||||
default: 'gpt-35-turbo',
|
||||
optional: true
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -79,6 +62,14 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
|
|
@ -102,10 +93,49 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Image Resolution',
|
||||
description: 'This parameter controls the resolution in which the model views the image.',
|
||||
name: 'imageResolution',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Low',
|
||||
name: 'low'
|
||||
},
|
||||
{
|
||||
label: 'High',
|
||||
name: 'high'
|
||||
},
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
}
|
||||
],
|
||||
default: 'low',
|
||||
optional: false,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'azureChatOpenAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
|
|
@ -115,6 +145,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const timeout = nodeData.inputs?.timeout as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
|
|
@ -122,6 +153,9 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
|
|
@ -137,8 +171,17 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false,
|
||||
imageResolution
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||