diff --git a/en/.gitbook/assets/API Access.png b/en/.gitbook/assets/API Access.png new file mode 100644 index 0000000..a992eb5 Binary files /dev/null and b/en/.gitbook/assets/API Access.png differ diff --git a/en/.gitbook/assets/OpenAI API Key.png b/en/.gitbook/assets/OpenAI API Key.png new file mode 100644 index 0000000..59636e9 Binary files /dev/null and b/en/.gitbook/assets/OpenAI API Key.png differ diff --git a/en/.gitbook/assets/WechatIMG156.jpg b/en/.gitbook/assets/WechatIMG156.jpg new file mode 100644 index 0000000..0ae00d8 Binary files /dev/null and b/en/.gitbook/assets/WechatIMG156.jpg differ diff --git a/en/.gitbook/assets/WechatIMG157.jpg b/en/.gitbook/assets/WechatIMG157.jpg new file mode 100644 index 0000000..3bf8f26 Binary files /dev/null and b/en/.gitbook/assets/WechatIMG157.jpg differ diff --git a/en/.gitbook/assets/WechatIMG158.jpg b/en/.gitbook/assets/WechatIMG158.jpg new file mode 100644 index 0000000..43352b4 Binary files /dev/null and b/en/.gitbook/assets/WechatIMG158.jpg differ diff --git a/en/.gitbook/assets/WechatIMG160.jpg b/en/.gitbook/assets/WechatIMG160.jpg new file mode 100644 index 0000000..1acdc82 Binary files /dev/null and b/en/.gitbook/assets/WechatIMG160.jpg differ diff --git a/en/.gitbook/assets/WechatIMG38.jpg b/en/.gitbook/assets/WechatIMG38.jpg new file mode 100644 index 0000000..7aa8791 Binary files /dev/null and b/en/.gitbook/assets/WechatIMG38.jpg differ diff --git a/en/.gitbook/assets/add-new-segment.png b/en/.gitbook/assets/add-new-segment.png new file mode 100644 index 0000000..6de3b9e Binary files /dev/null and b/en/.gitbook/assets/add-new-segment.png differ diff --git a/en/.gitbook/assets/app-variables.png b/en/.gitbook/assets/app-variables.png new file mode 100644 index 0000000..ebd2964 Binary files /dev/null and b/en/.gitbook/assets/app-variables.png differ diff --git a/en/.gitbook/assets/create a new App.png b/en/.gitbook/assets/create a new App.png new file mode 100644 index 0000000..fc0cd42 Binary files /dev/null and b/en/.gitbook/assets/create a new App.png differ diff --git a/en/.gitbook/assets/create-app.png b/en/.gitbook/assets/create-app.png new file mode 100644 index 0000000..5a47573 Binary files /dev/null and b/en/.gitbook/assets/create-app.png differ diff --git a/en/.gitbook/assets/image (1).png b/en/.gitbook/assets/image (1).png new file mode 100644 index 0000000..0ef5f75 Binary files /dev/null and b/en/.gitbook/assets/image (1).png differ diff --git a/en/.gitbook/assets/image (10).png b/en/.gitbook/assets/image (10).png new file mode 100644 index 0000000..14e5b94 Binary files /dev/null and b/en/.gitbook/assets/image (10).png differ diff --git a/en/.gitbook/assets/image (11).png b/en/.gitbook/assets/image (11).png new file mode 100644 index 0000000..2ef8774 Binary files /dev/null and b/en/.gitbook/assets/image (11).png differ diff --git a/en/.gitbook/assets/image (12).png b/en/.gitbook/assets/image (12).png new file mode 100644 index 0000000..b912edf Binary files /dev/null and b/en/.gitbook/assets/image (12).png differ diff --git a/en/.gitbook/assets/image (13).png b/en/.gitbook/assets/image (13).png new file mode 100644 index 0000000..72ea54e Binary files /dev/null and b/en/.gitbook/assets/image (13).png differ diff --git a/en/.gitbook/assets/image (14).png b/en/.gitbook/assets/image (14).png new file mode 100644 index 0000000..f8b59b3 Binary files /dev/null and b/en/.gitbook/assets/image (14).png differ diff --git a/en/.gitbook/assets/image (15).png b/en/.gitbook/assets/image (15).png new file mode 100644 index 0000000..09de148 Binary files /dev/null and b/en/.gitbook/assets/image (15).png differ diff --git a/en/.gitbook/assets/image (16).png b/en/.gitbook/assets/image (16).png new file mode 100644 index 0000000..42309ea Binary files /dev/null and b/en/.gitbook/assets/image (16).png differ diff --git a/en/.gitbook/assets/image (17).png b/en/.gitbook/assets/image (17).png new file mode 100644 index 0000000..ec81ad7 Binary files /dev/null and b/en/.gitbook/assets/image (17).png differ diff --git a/en/.gitbook/assets/image (18).png b/en/.gitbook/assets/image (18).png new file mode 100644 index 0000000..e5b1aa0 Binary files /dev/null and b/en/.gitbook/assets/image (18).png differ diff --git a/en/.gitbook/assets/image (19).png b/en/.gitbook/assets/image (19).png new file mode 100644 index 0000000..381bea4 Binary files /dev/null and b/en/.gitbook/assets/image (19).png differ diff --git a/en/.gitbook/assets/image (2).png b/en/.gitbook/assets/image (2).png new file mode 100644 index 0000000..4f9324b Binary files /dev/null and b/en/.gitbook/assets/image (2).png differ diff --git a/en/.gitbook/assets/image (20).png b/en/.gitbook/assets/image (20).png new file mode 100644 index 0000000..1376e80 Binary files /dev/null and b/en/.gitbook/assets/image (20).png differ diff --git a/en/.gitbook/assets/image (21).png b/en/.gitbook/assets/image (21).png new file mode 100644 index 0000000..ee7aecf Binary files /dev/null and b/en/.gitbook/assets/image (21).png differ diff --git a/en/.gitbook/assets/image (22).png b/en/.gitbook/assets/image (22).png new file mode 100644 index 0000000..db192aa Binary files /dev/null and b/en/.gitbook/assets/image (22).png differ diff --git a/en/.gitbook/assets/image (23).png b/en/.gitbook/assets/image (23).png new file mode 100644 index 0000000..f173753 Binary files /dev/null and b/en/.gitbook/assets/image (23).png differ diff --git a/en/.gitbook/assets/image (24).png b/en/.gitbook/assets/image (24).png new file mode 100644 index 0000000..07c3064 Binary files /dev/null and b/en/.gitbook/assets/image (24).png differ diff --git a/en/.gitbook/assets/image (25).png b/en/.gitbook/assets/image (25).png new file mode 100644 index 0000000..71c5637 Binary files /dev/null and b/en/.gitbook/assets/image (25).png differ diff --git a/en/.gitbook/assets/image (26).png b/en/.gitbook/assets/image (26).png new file mode 100644 index 0000000..4bcf779 Binary files /dev/null and b/en/.gitbook/assets/image (26).png differ diff --git a/en/.gitbook/assets/image (27).png b/en/.gitbook/assets/image (27).png new file mode 100644 index 0000000..f090407 Binary files /dev/null and b/en/.gitbook/assets/image (27).png differ diff --git a/en/.gitbook/assets/image (28).png b/en/.gitbook/assets/image (28).png new file mode 100644 index 0000000..ec9d87f Binary files /dev/null and b/en/.gitbook/assets/image (28).png differ diff --git a/en/.gitbook/assets/image (29).png b/en/.gitbook/assets/image (29).png new file mode 100644 index 0000000..0986861 Binary files /dev/null and b/en/.gitbook/assets/image (29).png differ diff --git a/en/.gitbook/assets/image (3).png b/en/.gitbook/assets/image (3).png new file mode 100644 index 0000000..46e504d Binary files /dev/null and b/en/.gitbook/assets/image (3).png differ diff --git a/en/.gitbook/assets/image (30).png b/en/.gitbook/assets/image (30).png new file mode 100644 index 0000000..d3aa0df Binary files /dev/null and b/en/.gitbook/assets/image (30).png differ diff --git a/en/.gitbook/assets/image (31).png b/en/.gitbook/assets/image (31).png new file mode 100644 index 0000000..f0a0144 Binary files /dev/null and b/en/.gitbook/assets/image (31).png differ diff --git a/en/.gitbook/assets/image (32).png b/en/.gitbook/assets/image (32).png new file mode 100644 index 0000000..2e9429a Binary files /dev/null and b/en/.gitbook/assets/image (32).png differ diff --git a/en/.gitbook/assets/image (33).png b/en/.gitbook/assets/image (33).png new file mode 100644 index 0000000..b99288f Binary files /dev/null and b/en/.gitbook/assets/image (33).png differ diff --git a/en/.gitbook/assets/image (34).png b/en/.gitbook/assets/image (34).png new file mode 100644 index 0000000..5709ac4 Binary files /dev/null and b/en/.gitbook/assets/image (34).png differ diff --git a/en/.gitbook/assets/image (35).png b/en/.gitbook/assets/image (35).png new file mode 100644 index 0000000..0b73df0 Binary files /dev/null and b/en/.gitbook/assets/image (35).png differ diff --git a/en/.gitbook/assets/image (36).png b/en/.gitbook/assets/image (36).png new file mode 100644 index 0000000..5cf0555 Binary files /dev/null and b/en/.gitbook/assets/image (36).png differ diff --git a/en/.gitbook/assets/image (37).png b/en/.gitbook/assets/image (37).png new file mode 100644 index 0000000..3b96b7a Binary files /dev/null and b/en/.gitbook/assets/image (37).png differ diff --git a/en/.gitbook/assets/image (38).png b/en/.gitbook/assets/image (38).png new file mode 100644 index 0000000..430f2a3 Binary files /dev/null and b/en/.gitbook/assets/image (38).png differ diff --git a/en/.gitbook/assets/image (39).png b/en/.gitbook/assets/image (39).png new file mode 100644 index 0000000..c4aa379 Binary files /dev/null and b/en/.gitbook/assets/image (39).png differ diff --git a/en/.gitbook/assets/image (4).png b/en/.gitbook/assets/image (4).png new file mode 100644 index 0000000..f8b59b3 Binary files /dev/null and b/en/.gitbook/assets/image (4).png differ diff --git a/en/.gitbook/assets/image (40).png b/en/.gitbook/assets/image (40).png new file mode 100644 index 0000000..63dc5b2 Binary files /dev/null and b/en/.gitbook/assets/image (40).png differ diff --git a/en/.gitbook/assets/image (41).png b/en/.gitbook/assets/image (41).png new file mode 100644 index 0000000..c7c2d65 Binary files /dev/null and b/en/.gitbook/assets/image (41).png differ diff --git a/en/.gitbook/assets/image (42).png b/en/.gitbook/assets/image (42).png new file mode 100644 index 0000000..6dc2ffc Binary files /dev/null and b/en/.gitbook/assets/image (42).png differ diff --git a/en/.gitbook/assets/image (43).png b/en/.gitbook/assets/image (43).png new file mode 100644 index 0000000..732fa67 Binary files /dev/null and b/en/.gitbook/assets/image (43).png differ diff --git a/en/.gitbook/assets/image (44).png b/en/.gitbook/assets/image (44).png new file mode 100644 index 0000000..ee7aecf Binary files /dev/null and b/en/.gitbook/assets/image (44).png differ diff --git a/en/.gitbook/assets/image (45).png b/en/.gitbook/assets/image (45).png new file mode 100644 index 0000000..3407ccd Binary files /dev/null and b/en/.gitbook/assets/image (45).png differ diff --git a/en/.gitbook/assets/image (46).png b/en/.gitbook/assets/image (46).png new file mode 100644 index 0000000..703e9ed Binary files /dev/null and b/en/.gitbook/assets/image (46).png differ diff --git a/en/.gitbook/assets/image (47).png b/en/.gitbook/assets/image (47).png new file mode 100644 index 0000000..3abdd4e Binary files /dev/null and b/en/.gitbook/assets/image (47).png differ diff --git a/en/.gitbook/assets/image (48).png b/en/.gitbook/assets/image (48).png new file mode 100644 index 0000000..c276a61 Binary files /dev/null and b/en/.gitbook/assets/image (48).png differ diff --git a/en/.gitbook/assets/image (49).png b/en/.gitbook/assets/image (49).png new file mode 100644 index 0000000..3abdd4e Binary files /dev/null and b/en/.gitbook/assets/image (49).png differ diff --git a/en/.gitbook/assets/image (5).png b/en/.gitbook/assets/image (5).png new file mode 100644 index 0000000..2c0fa11 Binary files /dev/null and b/en/.gitbook/assets/image (5).png differ diff --git a/en/.gitbook/assets/image (50).png b/en/.gitbook/assets/image (50).png new file mode 100644 index 0000000..e3f360e Binary files /dev/null and b/en/.gitbook/assets/image (50).png differ diff --git a/en/.gitbook/assets/image (51).png b/en/.gitbook/assets/image (51).png new file mode 100644 index 0000000..8c544fb Binary files /dev/null and b/en/.gitbook/assets/image (51).png differ diff --git a/en/.gitbook/assets/image (52).png b/en/.gitbook/assets/image (52).png new file mode 100644 index 0000000..9aa1946 Binary files /dev/null and b/en/.gitbook/assets/image (52).png differ diff --git a/en/.gitbook/assets/image (53).png b/en/.gitbook/assets/image (53).png new file mode 100644 index 0000000..09c869e Binary files /dev/null and b/en/.gitbook/assets/image (53).png differ diff --git a/en/.gitbook/assets/image (54).png b/en/.gitbook/assets/image (54).png new file mode 100644 index 0000000..4623dee Binary files /dev/null and b/en/.gitbook/assets/image (54).png differ diff --git a/en/.gitbook/assets/image (55).png b/en/.gitbook/assets/image (55).png new file mode 100644 index 0000000..7355239 Binary files /dev/null and b/en/.gitbook/assets/image (55).png differ diff --git a/en/.gitbook/assets/image (56).png b/en/.gitbook/assets/image (56).png new file mode 100644 index 0000000..ca5aa4f Binary files /dev/null and b/en/.gitbook/assets/image (56).png differ diff --git a/en/.gitbook/assets/image (57).png b/en/.gitbook/assets/image (57).png new file mode 100644 index 0000000..15e2ac4 Binary files /dev/null and b/en/.gitbook/assets/image (57).png differ diff --git a/en/.gitbook/assets/image (59).png b/en/.gitbook/assets/image (59).png new file mode 100644 index 0000000..7b1c492 Binary files /dev/null and b/en/.gitbook/assets/image (59).png differ diff --git a/en/.gitbook/assets/image (6).png b/en/.gitbook/assets/image (6).png new file mode 100644 index 0000000..bff16da Binary files /dev/null and b/en/.gitbook/assets/image (6).png differ diff --git a/en/.gitbook/assets/image (60).png b/en/.gitbook/assets/image (60).png new file mode 100644 index 0000000..ba9147c Binary files /dev/null and b/en/.gitbook/assets/image (60).png differ diff --git a/en/.gitbook/assets/image (61).png b/en/.gitbook/assets/image (61).png new file mode 100644 index 0000000..d77c263 Binary files /dev/null and b/en/.gitbook/assets/image (61).png differ diff --git a/en/.gitbook/assets/image (62).png b/en/.gitbook/assets/image (62).png new file mode 100644 index 0000000..bc44644 Binary files /dev/null and b/en/.gitbook/assets/image (62).png differ diff --git a/en/.gitbook/assets/image (63).png b/en/.gitbook/assets/image (63).png new file mode 100644 index 0000000..2ea7d6f Binary files /dev/null and b/en/.gitbook/assets/image (63).png differ diff --git a/en/.gitbook/assets/image (64).png b/en/.gitbook/assets/image (64).png new file mode 100644 index 0000000..499f03a Binary files /dev/null and b/en/.gitbook/assets/image (64).png differ diff --git a/en/.gitbook/assets/image (65).png b/en/.gitbook/assets/image (65).png new file mode 100644 index 0000000..af2f828 Binary files /dev/null and b/en/.gitbook/assets/image (65).png differ diff --git a/en/.gitbook/assets/image (67).png b/en/.gitbook/assets/image (67).png new file mode 100644 index 0000000..a99d97c Binary files /dev/null and b/en/.gitbook/assets/image (67).png differ diff --git a/en/.gitbook/assets/image (68).png b/en/.gitbook/assets/image (68).png new file mode 100644 index 0000000..37d39f7 Binary files /dev/null and b/en/.gitbook/assets/image (68).png differ diff --git a/en/.gitbook/assets/image (69).png b/en/.gitbook/assets/image (69).png new file mode 100644 index 0000000..8cb4687 Binary files /dev/null and b/en/.gitbook/assets/image (69).png differ diff --git a/en/.gitbook/assets/image (7).png b/en/.gitbook/assets/image (7).png new file mode 100644 index 0000000..2406525 Binary files /dev/null and b/en/.gitbook/assets/image (7).png differ diff --git a/en/.gitbook/assets/image (70).png b/en/.gitbook/assets/image (70).png new file mode 100644 index 0000000..378c018 Binary files /dev/null and b/en/.gitbook/assets/image (70).png differ diff --git a/en/.gitbook/assets/image (72).png b/en/.gitbook/assets/image (72).png new file mode 100644 index 0000000..0ef5f75 Binary files /dev/null and b/en/.gitbook/assets/image (72).png differ diff --git a/en/.gitbook/assets/image (73).png b/en/.gitbook/assets/image (73).png new file mode 100644 index 0000000..47df9c3 Binary files /dev/null and b/en/.gitbook/assets/image (73).png differ diff --git a/en/.gitbook/assets/image (74).png b/en/.gitbook/assets/image (74).png new file mode 100644 index 0000000..378c018 Binary files /dev/null and b/en/.gitbook/assets/image (74).png differ diff --git a/en/.gitbook/assets/image (75).png b/en/.gitbook/assets/image (75).png new file mode 100644 index 0000000..adc63ca Binary files /dev/null and b/en/.gitbook/assets/image (75).png differ diff --git a/en/.gitbook/assets/image (76).png b/en/.gitbook/assets/image (76).png new file mode 100644 index 0000000..2f6c180 Binary files /dev/null and b/en/.gitbook/assets/image (76).png differ diff --git a/en/.gitbook/assets/image (77).png b/en/.gitbook/assets/image (77).png new file mode 100644 index 0000000..16115b0 Binary files /dev/null and b/en/.gitbook/assets/image (77).png differ diff --git a/en/.gitbook/assets/image (78).png b/en/.gitbook/assets/image (78).png new file mode 100644 index 0000000..d30d2ab Binary files /dev/null and b/en/.gitbook/assets/image (78).png differ diff --git a/en/.gitbook/assets/image (79).png b/en/.gitbook/assets/image (79).png new file mode 100644 index 0000000..3d4bcca Binary files /dev/null and b/en/.gitbook/assets/image (79).png differ diff --git a/en/.gitbook/assets/image (8).png b/en/.gitbook/assets/image (8).png new file mode 100644 index 0000000..cc5a958 Binary files /dev/null and b/en/.gitbook/assets/image (8).png differ diff --git a/en/.gitbook/assets/image (80).png b/en/.gitbook/assets/image (80).png new file mode 100644 index 0000000..12f51f4 Binary files /dev/null and b/en/.gitbook/assets/image (80).png differ diff --git a/en/.gitbook/assets/image (81).png b/en/.gitbook/assets/image (81).png new file mode 100644 index 0000000..12277a9 Binary files /dev/null and b/en/.gitbook/assets/image (81).png differ diff --git a/en/.gitbook/assets/image (82).png b/en/.gitbook/assets/image (82).png new file mode 100644 index 0000000..db1690f Binary files /dev/null and b/en/.gitbook/assets/image (82).png differ diff --git a/en/.gitbook/assets/image (83).png b/en/.gitbook/assets/image (83).png new file mode 100644 index 0000000..97bdd29 Binary files /dev/null and b/en/.gitbook/assets/image (83).png differ diff --git a/en/.gitbook/assets/image (84).png b/en/.gitbook/assets/image (84).png new file mode 100644 index 0000000..d15f46e Binary files /dev/null and b/en/.gitbook/assets/image (84).png differ diff --git a/en/.gitbook/assets/image (9).png b/en/.gitbook/assets/image (9).png new file mode 100644 index 0000000..4623dee Binary files /dev/null and b/en/.gitbook/assets/image (9).png differ diff --git a/en/.gitbook/assets/image.png b/en/.gitbook/assets/image.png new file mode 100644 index 0000000..db1690f Binary files /dev/null and b/en/.gitbook/assets/image.png differ diff --git a/en/.gitbook/assets/notion-connect.png b/en/.gitbook/assets/notion-connect.png new file mode 100644 index 0000000..9d7175b Binary files /dev/null and b/en/.gitbook/assets/notion-connect.png differ diff --git a/en/.gitbook/assets/pre-prompt.png b/en/.gitbook/assets/pre-prompt.png new file mode 100644 index 0000000..aee298a Binary files /dev/null and b/en/.gitbook/assets/pre-prompt.png differ diff --git a/en/.gitbook/assets/screenshot-20230802-114025.png b/en/.gitbook/assets/screenshot-20230802-114025.png new file mode 100644 index 0000000..311d12a Binary files /dev/null and b/en/.gitbook/assets/screenshot-20230802-114025.png differ diff --git a/en/.gitbook/assets/screenshot-20230802-141724.png b/en/.gitbook/assets/screenshot-20230802-141724.png new file mode 100644 index 0000000..715b757 Binary files /dev/null and b/en/.gitbook/assets/screenshot-20230802-141724.png differ diff --git a/en/.gitbook/assets/screenshot-20230802-141913.png b/en/.gitbook/assets/screenshot-20230802-141913.png new file mode 100644 index 0000000..0db0705 Binary files /dev/null and b/en/.gitbook/assets/screenshot-20230802-141913.png differ diff --git a/en/.gitbook/assets/screenshot-20230802-142407.png b/en/.gitbook/assets/screenshot-20230802-142407.png new file mode 100644 index 0000000..3276d98 Binary files /dev/null and b/en/.gitbook/assets/screenshot-20230802-142407.png differ diff --git a/en/.gitbook/assets/screenshot-20230802-145326.png b/en/.gitbook/assets/screenshot-20230802-145326.png new file mode 100644 index 0000000..63250ec Binary files /dev/null and b/en/.gitbook/assets/screenshot-20230802-145326.png differ diff --git a/en/.gitbook/assets/segment-list.png b/en/.gitbook/assets/segment-list.png new file mode 100644 index 0000000..68f8cc8 Binary files /dev/null and b/en/.gitbook/assets/segment-list.png differ diff --git a/en/.gitbook/assets/share your App.png b/en/.gitbook/assets/share your App.png new file mode 100644 index 0000000..627a54d Binary files /dev/null and b/en/.gitbook/assets/share your App.png differ diff --git a/en/.gitbook/assets/sync-notion-data.png b/en/.gitbook/assets/sync-notion-data.png new file mode 100644 index 0000000..0f0b035 Binary files /dev/null and b/en/.gitbook/assets/sync-notion-data.png differ diff --git a/en/README.md b/en/README.md new file mode 100644 index 0000000..e42f48e --- /dev/null +++ b/en/README.md @@ -0,0 +1,64 @@ +--- +description: >- + The name "Dify" is derived from the two words "Define" and "Modify". It + represents the vision to help developers continuously improve their AI + applications. "Dify" can be understood as "Do it for you" +--- + +# Welcome to Dify! + +{% hint style="info" %} +Tips: Dify is currently in the beta preview stage. If there are any inconsistencies between the documentation and the product, please refer to the actual product experience. +{% endhint %} + +If you are amazed and excited by the rapid development of LLM technologies such as GPT-4 and can't wait to use them for something useful! But you have all these confusing questions in your mind: + +* How do I "train" a model based on my content? +* How do I let AI know about things that happened after 2021? +* How do I prevent AI from babbling nonsense with users? +* What do fine-tuning and embedding mean? + +Well, Dify is just what you need. + +**Dify aims to enable developers (and even non-developers) to quickly build useful applications based on large language models, ensuring they are visual, operable, and improvable.** + +> "We shape our tools, and then our tools shape us." - Marshall McLuhan + +You can quickly build a Web App using Dify, and the generated frontend code can be hosted on Dify. If you want to develop further based on this Web App, you can obtain these templates from GitHub and deploy them anywhere (e.g., Vercel or your server). Alternatively, you can develop your own Web frontend, mobile App, etc., based on the WebAPI, saving you backend development work. + +Moreover, the core concept of Dify is to create, configure, and improve your application in a visual interface. Application development based on LLM has a continuous improvement lifecycle, and you may need to make AI give correct answers based on your content, improve AI's accuracy and narrative style, or even download a subtitle from YouTube as context. + +This process will involve some logic design, context enhancement, data preparation, and other efforts that may be challenging without the right tools... We call this process LLMOps. + +### Next Steps + +* Check out these applications created with Dify +* Quickly create applications in the cloud +* Install Dify on your server + +> "Only a few companies will have the budget to build and manage large language models (LLM) like GPT-3, but there will be many billion-dollar 'second layer' companies that emerge over the next decade."———Sam Altman + +Just as the LLM technology is rapidly evolving, Dify is a constantly improving product, and there may be some discrepancies between the content of this document and the actual product. You can share your thoughts with us on [GitHub](https://github.com/langgenius) or Discord. + +### Q\&A + +**Q: What can I do with Dify?** +A: Dify is a simple yet powerful natural language programming tool. You can use it to build commercial-grade applications, personal assistants. If you want to develop applications yourself, Dify can also save you the backend work of accessing OpenAI, but using our gradually provided high visual operation ability, you can continuously improve and train your GPT model. + +**Q: How do I use Dify to train my own models?** +A: A valuable application consists of Prompt Engineering, Context Enhancement and Fine-tuning. We have created a hybrid programming method that combines prompts and programming languages (similar to a template engine). You can easily complete long text embedding or grab the subtitles of a YouTube video entered by the user - these will be used as context submitted to LLMs for calculation. We pay great attention to the operability of the application. The data generated by your users during the use of the App can be analyzed, labeled and continuously trained. The above steps may consume a lot of your time without good tool support. + +**Q: What do I need to prepare to create my own application?** +A: You choose a model provider such as OpenAI. Our cloud version has a built-in trial model of GPT-4. You can fill in your own API key. Then you can create an app based on prompts or your own context. + +**Q: Can applications built with Dify maintain conversations?** +A: Yes, if you create a conversational application, it has built-in session saving capabilities, supported in both generated web apps and APIs. + +**Q: What's the difference between LLMOps and MLOps?** +A: In the past, MLOps allowed developers to train models from scratch, while LLMOps developed AI-native applications based on powerful models such as GPT-4. You can refer to this article. + +**Q: What interface languages are provided?** +A: English and Chinese are currently supported. You can contribute language packs for us. + +**Q: What is LangGenius?** +A: LangGenius was the product name before Dify's official launch. We are still updating all the documentation. The name "Dify" is derived from the two words "Define" and "Modify". It represents the vision to help developers continuously improve their AI applications. "Dify" can be understood as "Do it for you". diff --git a/en/SUMMARY.md b/en/SUMMARY.md new file mode 100644 index 0000000..1259622 --- /dev/null +++ b/en/SUMMARY.md @@ -0,0 +1,65 @@ +# Table of contents + +## Getting Started + +* [Welcome to Dify!](README.md) +* [Cloud](getting-started/cloud.md) +* [Install(Self hosted)](getting-started/install-self-hosted/README.md) + * [Docker Compose Deployment](getting-started/install-self-hosted/docker-compose.md) + * [Local Source Code Start](getting-started/install-self-hosted/local-source-code.md) + * [Start the frontend Docker container separately](getting-started/install-self-hosted/start-the-frontend-docker-container.md) + * [Environments](getting-started/install-self-hosted/environments.md) +* [What is LLMOps?](getting-started/what-is-llmops.md) +* [FAQ](getting-started/faq/README.md) + * [Install FAQ](getting-started/faq/install-faq.md) + * [LLMs-use-FAQ](getting-started/faq/llms-use-faq.md) + * [API-use-FAQ](getting-started/faq/api-use-faq.md) + +## Application + +* [Creating An Application](application/creating-an-application.md) +* [Launch the WebApp quickly](application/launch-webapp.md) +* [Prompt Engineering](application/prompt-engineering/README.md) + * [Text Generator](application/prompt-engineering/text-generation-application.md) + * [Conversation Application](application/prompt-engineering/conversation-application.md) +* [Developing with APIs](application/developing-with-apis.md) +* [Logs & Annotations](application/logs.md) + +## web application + +* [Overview](web-application/overview.md) +* [Text Generator](web-application/text-generator.md) +* [Conversation Application](web-application/conversation-application.md) + +## Explore + +* [Chat](explore/chat.md) + +## Advanced + +* [Datasets\&Index](advanced/datasets/README.md) + * [Sync from Notion](advanced/datasets/sync-from-notion.md) + * [Dataset of QA model](advanced/datasets/dataset-of-qa-model.md) +* [Plugins](advanced/ai-plugins.md) +* [Based on WebApp Template](advanced/based-on-frontend-templates.md) +* [Model Configuration](advanced/model-configuration/README.md) + * [Hugging Face](advanced/model-configuration/hugging-face.md) + * [Replicate](advanced/model-configuration/replicate.md) +* [More Integration](advanced/more-integration.md) + +## use cases + +* [How to Build an Notion AI Assistant Based on Your Own Notes?](use-cases/build-an-notion-ai-assistant.md) +* [Create an AI ChatBot with Business Data in Minutes](use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md) +* [Create a Midjoureny Prompt Bot Without Code in Just a Few Minutes](use-cases/create-a-midjoureny-prompt-bot-with-dify.md) + +## Community + +* [Support](community/support.md) +* [Open-Source License](community/open-source.md) +* [Data Security](community/data-security.md) + +## User Agreement + +* [Terms of Service](user-agreement/terms-of-service.md) +* [Privacy Policy](user-agreement/privacy-policy.md) diff --git a/en/advanced/ai-plugins.md b/en/advanced/ai-plugins.md new file mode 100644 index 0000000..9f5186a --- /dev/null +++ b/en/advanced/ai-plugins.md @@ -0,0 +1,5 @@ +# Plugins + +{% hint style="info" %} +Plugins is an upcoming feature of Dify. You can incorporate plugins into your App orchestration and access AI applications with plugin capabilities through an API or WebApp. Dify is compatible with the ChatGPT Plugins standard and provides some native plugins. +{% endhint %} diff --git a/en/advanced/based-on-frontend-templates.md b/en/advanced/based-on-frontend-templates.md new file mode 100644 index 0000000..2a412b7 --- /dev/null +++ b/en/advanced/based-on-frontend-templates.md @@ -0,0 +1,39 @@ +# Based on WebApp Template + +If developers are developing new products from scratch or in the product prototype design phase, you can quickly launch AI sites using Dify. At the same time, Dify hopes that developers can fully freely create different forms of front-end applications. For this reason, we provide: + +* **SDK** for quick access to the Dify API in various languages +* **WebApp Template** for WebApp development scaffolding for each type of application + +The WebApp Templates are open source under the MIT license. You are free to modify and deploy them to achieve all the capabilities of Dify or as a reference code for implementing your own App. + +You can find these Templates on GitHub: + +* [Conversational app](https://github.com/langgenius/webapp-conversation) +* [Text generation app](https://github.com/langgenius/webapp-text-generator) + +The fastest way to use the WebApp Template is to click "**Use this template**" on GitHub, which is equivalent to forking a new repository. Then you need to configure the Dify App ID and API Key, like this: + +```javascript +export const APP_ID = '' +export const API_KEY = '' +``` + +More config in `config/index.ts`: + +``` +export const APP_INFO: AppInfo = { + "title": 'Chat APP', + "description": '', + "copyright": '', + "privacy_policy": '', + "default_language": 'zh-Hans' +} + +export const isShowPrompt = true +export const promptTemplate = '' +``` + +Each WebApp Template provides a README file containing deployment instructions. Usually, WebApp Templates contain a lightweight backend service to ensure that developers' API keys are not directly exposed to users. + +These WebApp Templates can help you quickly build prototypes of AI applications and use all the capabilities of Dify. If you develop your own applications or new templates based on them, feel free to share with us. diff --git a/en/advanced/datasets/README.md b/en/advanced/datasets/README.md new file mode 100644 index 0000000..446ca3b --- /dev/null +++ b/en/advanced/datasets/README.md @@ -0,0 +1,137 @@ +# Datasets\&Index + +Most language models use outdated training data and have length limitations for the context of each request. For example, GPT-3.5 is trained on corpora from 2021 and has a limit of approximately 4k tokens per request. This means that developers who want their AI applications to be based on the latest and private context conversations must use techniques like embedding. + +Dify' dataset feature allows developers (and even non-technical users) to easily manage datasets and automatically integrate them into AI applications. All you need to do is prepare text content, such as: + +* Long text content (TXT, Markdown, JSONL, or even PDF files) +* Structured data (CSV, Excel, etc.) + +Additionally, we are gradually supporting syncing data from various data sources to datasets, including: + +* GitHub +* Databases +* Webpages +* ... + +{% hint style="info" %} +**Practice**: If your company wants to build an AI customer service assistant based on existing knowledge bases and product documentation, you can upload the documents to a dataset in Dify and create a conversational application. This might have taken you several weeks in the past and been difficult to maintain continuously. +{% endhint %} + +### Datasets and Documents + +In Dify, datasets (Datasets) are collections of documents (Documents). A dataset can be integrated as a whole into an application to be used as context. Documents can be uploaded by developers or operations staff, or synced from other data sources (typically corresponding to a file unit in the data source). + +**Steps to upload a document:** + +1. Upload your file, usually a long text file or a spreadsheet +2. Segment, clean, and preview +3. Dify submits it to the LLM provider for embedding as vector data and storage +4. Set metadata for the document +5. Ready to use in the application! + +#### How to write a good dataset description + +When multiple datasets are referenced in an application, AI uses the description of the datasets and the user's question to determine which dataset to use to answer the user's question. Therefore, a well-written dataset description can improve the accuracy of AI in selecting datasets. + +The key to writing a good dataset description is to clearly describe the content and characteristics of the dataset. **It is recommended that the dataset description begin with this: `Useful only when the question you want to answer is about the following: specific description`**. Here is an example of a real estate dataset description: + +> Useful only when the question you want to answer is about the following: global real estate market data from 2010 to 2020. This data includes information such as the average housing price, property sales volume, and housing types for each city. In addition, this dataset also includes some economic indicators such as GDP and unemployment rate, as well as some social indicators such as population and education level. These indicators can help analyze the trends and influencing factors of the real estate market. +> With this data, we can understand the development trends of the global real estate market, analyze the changes in housing prices in various cities, and understand the impact of economic and social factors on the real estate market. + +### Create a dataset + +1. Click on datasets in the main navigation bar of Dify. On this page, you can see the existing datasets. Click on "Create Dataset" to enter the creation wizard. +2. If you have already prepared your files, you can start by uploading the files. +3. If you haven't prepared your documents yet, you can create an empty dataset first. + +### Uploading Documents By upload file + +1. Select the file you want to upload.We support batch uploads +2. Preview the full text +3. Perform segmentation and cleaning +4. Wait for Dify to process the data for you; this step usually consumes tokens in the LLM provider + +### Text Preprocessing and Cleaning + +Text Preprocessing and cleaning refers to Dify automatically segmenting and vectorizing your data documents so that user's questions (input) can match relevant paragraphs (Q to P), and generate results. + +When uploading a dataset, you need to select a **indexing mode** to specify how data is matched. This affects the accuracy of AI replies. + +In **High Quality mode**, OpenAI's embedding API is used for higher accuracy in user queries. + +In **Economic mode**, offline vector engines, keyword indexing etc. are used to reduce costs at the expense of lower accuracy. + +In **Segmenting in Question & Answer format**, instead of normal "Q to P" (question matches paragraphs), it uses "Q to Q" (question matches question) matching. After segmentation, Q\&A pairs are generated for each passage. When users ask questions, the system finds the most similar question and returns the corresponding passage as the answer. This is more precise because it directly matches the user's question and retrieves the information they need. + +> Questions have complete syntax while keywords lack semantics and context. So Q to Q improves clarity and handles similar high-frequency questions better. + +

In Segmenting in Question & Answer format, the text is summarized into multiple QA pairs

+ +

The difference between Q to P and Q to Q indexing modes

+ +### Modify Documents + +Modify Documents For technical reasons, if developers make the following changes to documents, Dify will create a new document for you, and the old document will be archived and deactivated: + +1. Adjust segmentation and cleaning settings +2. Re-upload the file + +Dify support customizing the segmented and cleaned text by adding, deleting, and editing paragraphs. You can dynamically adjust your segmentation to make your dataset more accurate. Click **Document --> paragraph --> Edit** in the dataset to modify paragraphs content. Click **Document --> paragraph --> Add new segment** to manually add new paragraph. + + + +

Add new segment

+ +### Maintain Datasets via API + +TODO + +### Dataset Settings + +Click **Settings** in the left navigation of the dataset. You can change the following settings for the dataset: + +* Dataset **name** for identifying a dataset +* Dataset **description** to allow AI to better use the dataset appropriately. If the description is empty, Dify's automatic indexing strategy will be used. +* **Permissions** can be set to Only Me or All Team Members. Those without permissions cannot view and edit the dataset. +* **Indexing mode**: In High Quality mode, OpenAI's embedding interface will be called to process and provide higher accuracy when users query. In Economic mode, offline vector engines, keyword indexing, etc. will be used to reduce accuracy without consuming tokens. + +Note: Upgrading the indexing mode from Economic to High Quality will incur additional token consumption. Downgrading from High Quality to Economic will not consume tokens. + +### Integrate into Applications + +Once the dataset is ready, it needs to be integrated into the application. When the AI application processes will automatically use the associated dataset content as a reference context. + +1. Go to the application - Prompt Arrangement page +2. In the context options, select the dataset you want to integrate +3. Save the settings to complete the integration + +### Q\&A + +**Q: What should I do if the PDF upload is garbled?** + +A: If your PDF parsing appears garbled under certain formatted contents, you could consider converting the PDF to Markdown format, which currently offers higher accuracy, or you could reduce the use of images, tables, and other formatted content in the PDF. We are researching ways to optimize the experience of using PDFs. + +**Q: How does the consumption mechanism of context work?** +A: With a dataset added, each query will consume segmented content (currently embedding two segments) + question + prompt + chat history combined. However, it will not exceed model limitations, such as 4096. + +**Q: Where does the embedded dataset appear when asking questions?** +A: It will be embedded as context before the question. + +**Q: Is there any priority between the added dataset and OpenAI's answers?** +A: The dataset serves as context and is used together with questions for LLM to understand and answer; there is no priority relationship. + +**Q: Why can I hit in test but not in application?** +A: You can troubleshoot issues by following these steps: + +1. Make sure you have added text on the prompt page and clicked on the save button in the top right corner. +2. Test whether it responds normally in the prompt debugging interface. +3. Try again in a new WebApp session window. +4. Optimize your data format and quality. For practice reference, visit [https://github.com/langgenius/dify/issues/90](https://github.com/langgenius/dify/issues/90) + If none of these steps solve your problem, please join our community for help. + +**Q: Will APIs related to hit testing be opened up so that dify can access knowledge bases and implement dialogue generation using custom models?** +A: We plan to open up Webhooks later on; however, there are no current plans for this feature. You can achieve your requirements by connecting to any vector database. + +**Q: How do I add multiple datasets?** +A: Due to short-term performance considerations, we currently only support one dataset. If you have multiple sets of data, you can upload them within the same dataset for use. diff --git a/en/advanced/datasets/dataset-of-qa-model.md b/en/advanced/datasets/dataset-of-qa-model.md new file mode 100644 index 0000000..2968110 --- /dev/null +++ b/en/advanced/datasets/dataset-of-qa-model.md @@ -0,0 +1,21 @@ +--- +description: QAmodeldataset +--- + +# Dataset of QA model + +**The Q\&A paragraph mode feature is different from the normal "Q2P" (question matches paragraph content) matching mode. The "Q2Q" (question matches question) matching mode means that when a user asks a question, the system will find the most similar question to it, and then return the corresponding paragraph as the answer. This method is more precise, because it directly matches the user's question, and can more accurately obtain the information that the user really needs.** + +
+ +1. QA dataset is created by summarizing each paragraph in a document and generating QA pairs from the summaries. This process summarizes the information in each paragraph and breaks it down to extract valuable insights for the user. + +
+ + + +
+2. At the same time, we support custom additions and modifications to the segmentation. Users can dynamically adjust their own segmentation information to make your dataset more precise. + + ![](<../../.gitbook/assets/image (68).png>)![](<../../.gitbook/assets/image (69).png>) +3. The problem text has a complete syntactic structure of natural language, rather than some keywords in a document retrieval task. Therefore, matching Q to Q makes the semantics and matching clearer while satisfying some high-frequency and high-similarity question scenarios. diff --git a/en/advanced/datasets/sync-from-notion.md b/en/advanced/datasets/sync-from-notion.md new file mode 100644 index 0000000..a28a0f4 --- /dev/null +++ b/en/advanced/datasets/sync-from-notion.md @@ -0,0 +1,26 @@ +# Sync from Notion + +Dify dataset supports importing from Notion and setting up **Sync** so that data is automatically synced to Dify after updates in Notion. + +### Authorization verification + +1. When creating a dataset, select the data source, click **Sync from Notion--Go to connect**, and complete the authorization verification according to the prompt. +2. You can also: click **Settings--Data Sources--Add a Data Source**, click Notion Source **Connect** to complete authorization verification. + +

Connect Notion

+ +### Import Notion data + +After completing authorization verification, go to the dataset creation page, click **Sync from Notion**, and select the required authorization page to import. + +### Segmentation and cleaning + +Next, select your **segmentation settings** and **indexing method**, **save and process**. Wait for Dify to process this data, usually this step requires token consumption in LLM providers. Dify not only supports importing ordinary page types but also summarizes and saves the page attributes under the database type. + +_**Note: Images and files are not currently supported for import. Table data will be converted to text.**_ + +### Sync Notion data + +If your Notion content has been modified, you can click Sync directly on the Dify dataset document list page to sync the data with one click(Please note that each time you click, the current content will be synchronized). This step requires token consumption. + +

Sync Notion data

diff --git a/en/advanced/model-configuration/README.md b/en/advanced/model-configuration/README.md new file mode 100644 index 0000000..6b383aa --- /dev/null +++ b/en/advanced/model-configuration/README.md @@ -0,0 +1,83 @@ +# Model Configuration + +Dify currently supports major model providers such as OpenAI's GPT series. Here are the model providers we currently support: + +* OpenAI +* Azure OpenAI Service +* Anthropic +* Hugging Face Hub +* Replicate +* iFLYTEK SPARK +* WENXINYIYAN +* TONGYI +* MINIMAX +* ChatGLM + +Based on technology developments and user needs, we will continue adding support for more LLM providers over time. + +### Trial Hosted Models + +We provide trial quotas for different models for Dify cloud service users. Please set up your own model provider before the trial quota runs out, otherwise it may impact normal use of your application. + +* **OpenAI hosted model trial:** We provide 500 free call credits for you to try out GPT3.5-turbo, GPT3.5-turbo-16k, text-davinci-003 models. +* **Anthropic Claude hosted model trial:** We provide 1000 free call credits for you to try out Claude-instant-1, Claude2 models. + +### Model type + +In Dify, we divide models into the following 3 categories according to their usage scenarios: + +1. System Reasoning Model. In the created application, this type of model is used. Smart chat, dialogue name generation, and next question suggestions also use reasoning models. +2. Embedding Model. In the dataset, this type of model is used to embedding segmented documents. In applications that use data sets, this type of model is also used to process user questions as Embedding. +3. Speech-to-Text model. In conversational applications, this type of model is used to convert speech to text. + +### Set default model + +When Dify needs a model, it will select the set default model according to the usage scenario. Set the default model in `Settings > Model Provider`. + +
+ +### Access model settings + +Set the model to be imported in Dify's `Settings > Model Provider`. + +![](<../../.gitbook/assets/image (83).png>) + +There are two types of model suppliers: + +1. Own model. Model suppliers of this type provide models developed by themselves. Such as OpenAI, Anthropic, etc. +2. Hosting model. This type of model provider provides third-party models. Such as Hugging face, Replicate, etc. + +The different types of model suppliers are accessed slightly differently in Dify. + + + +### Model suppliers that access their own models + +After importing the supplier of its own model, Dify will automatically import all the models under the supplier. + +Set the API key of the corresponding model provider in Dify to access the model provider. Get the API address of the model provider as follows: + +* OpenAI: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys) +* Anthropic:[https://console.anthropic.com/account/keys](https://console.anthropic.com/account/keys) +* iFLYTEK SPARK:[https://www.xfyun.cn/solutions/xinghuoAPI](https://www.xfyun.cn/solutions/xinghuoAPI) +* MINIMAX:[https://api.minimax.chat/user-center/basic-information/interface-key](https://api.minimax.chat/user-center/basic-information/interface-key) +* WENXINYIYAN:[https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application) +* TONGYI:[https://dashscope.console.aliyun.com/api-key_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k](https://dashscope.console.aliyun.com/api-key_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k) +* ChatGLM: This model provider does not provide official services. But self-deployment is supported ([deployment docs](https://github.com/THUDM/ChatGLM2-6B/blob/main/README_EN.md#environment-setup)). + +{% hint style="info" %} +Dify uses [PKCS1_OAEP](https://pycryptodome.readthedocs.io/en/latest/src/cipher/oaep.html) to encrypt and store user-managed API keys, and each tenant uses an independent key pair for encryption to ensure that your API keys are not leaked. +{% endhint %} + +### Model suppliers that access hosted models + +There are many third-party models on hosting type providers. Access models need to be added one by one. The specific access method is as follows: + +* [Hugging Face](hugging-face.md). +* [Replicate](replicate.md). + +### Use model + +Once you have configured your models, you can use them in your application: + +
diff --git a/en/advanced/model-configuration/hugging-face.md b/en/advanced/model-configuration/hugging-face.md new file mode 100644 index 0000000..8edd10d --- /dev/null +++ b/en/advanced/model-configuration/hugging-face.md @@ -0,0 +1,64 @@ +# Hugging Face + +Dify supports models of the [text-generation](https://huggingface.co/models?pipeline_tag=text-generation\&sort=trending) and [text2text-generation](https://huggingface.co/models?pipeline_tag=text2text-generation\&sort=trending) types on the Hugging Face. Specific steps are as follows: + +1. You need a Hugging Face account ([registered address](https://huggingface.co/join)). +2. Set the API key of Hugging Face ([obtain address](https://huggingface.co/settings/tokens)). +3. Select a model to enter the [Hugging Face model list page](https://huggingface.co/models?pipeline_tag=text-generation\&sort=trending), and filter the models with [text-generation](https://huggingface.co/models?pipeline_tag=text-generation\&sort=trending) and [text2text-generation](https://huggingface.co/models?pipeline_tag=text2text-generation\&sort=trending). + +
+ +Dify supports accessing models on Hugging Face in two ways: + +1. Hosted Inference API. This method uses the model officially deployed by Hugging Face. No fee is required. But the downside is that only a small number of models support this approach. +2. Inference Endpiont. This method uses resources such as AWS accessed by the Hugging Face to deploy the model and requires payment. + +### Models that access the Hosted Inference API + +#### 1 Select a model + +Hosted inference API is supported only when there is an area containing Hosted inference API on the right side of the model details page. As shown in the figure below: + +
+ +On the model details page, you can get the name of the model. + +
+ +#### 2 Using access models in Dify + +Select Hosted Inference API for Endpoint Type in `Settings > Model Provider > Hugging Face`. As shown below: + +
+ +API Token is the API Key set at the beginning of the article. The model name is the model name obtained in the previous step. + + + +### Method 2: Inference Endpoint + +#### 1 Select the model to deploy + +Inference Endpiont is only supported for models with the Inference Endpionts option under the Deploy button on the right side of the model details page. As shown below: + +
+ + + +#### 2 Deployment model + +Click the Deploy button for the model and select the Inference Endpiont option. If you have not bound a bank card before, you will need to bind the card. Just follow the process. After binding the card, the following interface will appear: modify the configuration according to the requirements, and click Create Endpoint in the lower left corner to create an Inference Endpoint. + +
+ +After the model is deployed, you can see the Endpoint URL. + +
+ +#### 3 Using access models in Dify + +Select Inference Endpoints for Endpoint Type in `Settings > Model Provider > Hugging face`. As shown below: + +
+ +API Token is the API Key set at the beginning of the article. The name of the model is arbitrary. Endpoint URL is the Endpoint URL obtained after successfully deploying the model in the previous step. diff --git a/en/advanced/model-configuration/replicate.md b/en/advanced/model-configuration/replicate.md new file mode 100644 index 0000000..2af1e9c --- /dev/null +++ b/en/advanced/model-configuration/replicate.md @@ -0,0 +1,18 @@ +# Replicate + +Dify supports accessing [Language models](https://replicate.com/collections/language-models) and [Embedding models](https://replicate.com/collections/embedding-models) on Replicate. Language models correspond to Dify's reasoning model, and Embedding models correspond to Dify's Embedding model. + + + +Specific steps are as follows: + +1. You need to have a Replicate account ([registered address](https://replicate.com/signin?next=/docs)). +2. Get API Key ([get address](https://replicate.com/signin?next=/docs)). +3. Pick a model. Select the model under [Language models](https://replicate.com/collections/language-models) and [Embedding models](https://replicate.com/collections/embedding-models) . +4. Add models in Dify's `Settings > Model Provider > Replicate`. + +
+ +The API key is the API Key set in step 2. Model Name and Model Version can be found on the model details page: + +
diff --git a/en/advanced/more-integration.md b/en/advanced/more-integration.md new file mode 100644 index 0000000..d89ce30 --- /dev/null +++ b/en/advanced/more-integration.md @@ -0,0 +1,3 @@ +# More Integration + +TODO diff --git a/en/application/creating-an-application.md b/en/application/creating-an-application.md new file mode 100644 index 0000000..e8214f2 --- /dev/null +++ b/en/application/creating-an-application.md @@ -0,0 +1,53 @@ +# Creating An Application + +In Dify, an "application" refers to a real-world scenario application built on large language models such as GPT. By creating an application, you can apply intelligent AI technology to specific needs. It encompasses both the engineering paradigms for developing AI applications and the specific deliverables. + +**In short, an application delivers to developers:** + +* A user-friendly, encapsulated LLM API that can be called directly by backend or frontend applications with token authentication +* A ready-to-use, beautiful, and hosted Web App that you can develop further using the Web App templates +* A set of easy-to-use interfaces for Prompt Engineering, context management, log analysis, and annotation + +You can choose one or all of them to support your AI application development. + +### Application Types + +Dify offers two types of applications: text generation and conversational. More application paradigms may appear in the future (we should keep up-to-date), and the ultimate goal of Dify is to cover more than 80% of typical LLM application scenarios. The differences between text generation and conversational applications are shown in the table below: + +
Text GenerationConversational
WebApp InterfaceForm + ResultsChat style
API Endpointcompletion-messageschat-messages
Interaction ModeOne question and one answerMulti-turn dialogue
Streaming results returnSupportedSupported
Context PreservationCurrent timeContinuous
User input formSupportedSupported
Datasets&PluginsSupportedSupported
AI opening remarksNot supportedSupported
Scenario exampleTranslation, judgment, indexingChat or everything
+ +### Steps to Create an Application + +After logging in as an administrator in Dify, go to the main navigation application page Click "Create New Application" Choose a conversational or text generation application and give it a name (modifiable later) + +

Create a new App

+ +We provide some templates in the application creation interface, and you can click to create from a template in the popup when creating an application. These templates will provide inspiration and reference for the application you want to develop. + +### Creating from a Configuration File + +If you have obtained a template from the community or someone else, you can click to create from an application configuration file. Uploading the file will load most of the settings from the other party's application (but not the datasets at present). + +### Your Application + +If you are using it for the first time, you will be prompted to enter your OpenAI API key. A properly functioning LLM key is a prerequisite for using Dify. If you don't have one yet, please apply for one. + +

Enter your OpenAI API Key

+ +After creating an application or selecting an existing one, you will arrive at an application overview page showing the application's profile. You can directly access your WebApp or check the API status here, as well as enable or disable them. + +Statistics show the usage, active user count, and LLM call consumption of the application over a period of time—enabling you to continually improve the cost-effectiveness of application operations. We will gradually provide more useful visualization capabilities; please let us know what you want. + +1. Total Messages: Daily AI interactions count; prompt engineering/debugging excluded. +2. Active Users: Unique users engaging in Q\&A with AI; prompt engineering/debugging excluded. +3. Avg. Session Interactions: Continuous user-AI communication count; for conversation-based apps. +4. User Satisfaction Rate: Likes per 1,000 messages; indicates satisfaction with AI answers. +5. Avg. Response Time: Time (ms) for AI to process/respond; for text-based apps. +6. Token Usage: Daily language model token usage; for cost control. + +### What's Next + +* Try your WebApp +* Take a tour of the Configuration, Development, and Logs pages on the left +* Try configuring an application using a reference case +* If you have the ability to develop frontend applications, please consult the API documentation diff --git a/en/application/developing-with-apis.md b/en/application/developing-with-apis.md new file mode 100644 index 0000000..e9b3561 --- /dev/null +++ b/en/application/developing-with-apis.md @@ -0,0 +1,69 @@ +# Developing with APIs + +Dify offers a "Backend-as-a-Service" API, providing numerous benefits to AI application developers. This approach enables developers to access the powerful capabilities of large language models (LLMs) directly in frontend applications without the complexities of backend architecture and deployment processes. + +### Benefits of using Dify API + +* Allow frontend apps to securely access LLM capabilities without backend development +* Design applications visually with real-time updates across all clients +* Well-encapsulated original LLM APIs +* Effortlessly switch between LLM providers and centrally manage API keys +* Operate applications visually, including log analysis, annotation, and user activity observation +* Continuously provide more tools, plugins, and datasets + +### How to use + +Choose an application, and find the API Access in the left-side navigation of the Apps section. On this page, you can view the API documentation provided by Dify and manage credentials for accessing the API. + +

API document

+ +You can create multiple access credentials for an application to deliver to different users or developers. This means that API users can use the AI capabilities provided by the application developer, but the underlying Prompt engineering, datasets, and tool capabilities are encapsulated. + +{% hint style="warning" %} +In best practices, API keys should be called through the backend, rather than being directly exposed in plaintext within frontend code or requests. This helps prevent your application from being abused or attacked. +{% endhint %} + +For example, if you're a developer in a consulting company, you can offer AI capabilities based on the company's private database to end-users or developers, without exposing your data and AI logic design. This ensures a secure and sustainable service delivery that meets business objectives. + +### Text-generation application + +These applications are used to generate high-quality text, such as articles, summaries, translations, etc., by calling the completion-messages API and sending user input to obtain generated text results. The model parameters and prompt templates used for generating text depend on the developer's settings in the Dify Prompt Arrangement page. + +You can find the API documentation and example requests for this application in **Applications -> Access API**. + +For example, here is a sample call an API for text generation: + +``` +curl --location --request POST 'https://api.dify.dev/v1/completion-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "Hi", + "response_mode": "streaming", + "user": "abc-123" +}' +``` + + + +### Conversational applications + +Suitable for most scenarios, conversational applications engage in continuous dialogue with users in a question-and-answer format. To start a conversation, call the chat-messages API and maintain the session by continuously passing in the returned conversation_id. + +You can find the API documentation and example requests for this application in **Applications -> Access API**. + +For example, here is a sample call an API for chat-messages: + +``` +curl --location --request POST 'https://api.dify.dev/v1/chat-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "eh", + "response_mode": "streaming", + "conversation_id": "1c7e55fb-1ba2-4e10-81b5-30addcea2276" + "user": "abc-123" +}' +``` diff --git a/en/application/launch-webapp.md b/en/application/launch-webapp.md new file mode 100644 index 0000000..3a9e342 --- /dev/null +++ b/en/application/launch-webapp.md @@ -0,0 +1,46 @@ +# Launch the WebApp quickly + +One of the benefits of creating AI applications with Dify is that you can launch a user-friendly Web application in just a few minutes, based on your Prompt orchestration. + +* If you are using the self-hosted open-source version, the application will run on your server. +* If you are using the cloud version, the application will be hosted on udify.app. + +### Launch WebApp + +In the application overview page, you can find a card for the AI site (WebApp). Simply enable WebApp access to get a shareable link for your users. + +

Share your WebApp

+ +We provide a sleek WebApp interface for both of the following applications: + +* Text Generation (go to preview) +* Conversational (go to preview) + +### Configure your WebApp + +Click the settings button on the WebApp card to configure some options for the AI site. These will be visible to the end users: + +* Icon +* Name +* Application Description +* Interface Language +* Copyright Information +* Privacy Policy Link + +### Embed your WebApp + +Dify supports embedding your AI application into your business website. With this capability, you can create AI customer service and business knowledge Q\&A applications with business data on your official website within minutes. Click the embed button on the WebApp card, copy the embed code, and paste it into the desired location on your website. + +* For iframe tag: + + Copy the iframe code and paste it into the tags (such as `
`, `
`, etc.) on your website used to display the AI application. + +* For script tag: + + Copy the script code and paste it into the `` or `` tags on your website. + +
+ +For example, if you paste the script code into the section of your official website, you will get an AI chatbot on your website: + +
diff --git a/en/application/logs.md b/en/application/logs.md new file mode 100644 index 0000000..1d1cd53 --- /dev/null +++ b/en/application/logs.md @@ -0,0 +1,34 @@ +# Logs & Annotations + +{% hint style="warning" %} +Please ensure that your application complies with local regulations when collecting user data. The common practice is to publish a privacy policy and obtain user consent. +{% endhint %} + +The **Logs** feature is designed to observe and annotate the performance of Dify applications. Dify records logs for all interactions with the application, whether through the WebApp or API. If you are a Prompt Engineer or LLM operator, it will provide you with a visual experience of LLM application operations. + +### Using the Logs Console + +You can find the Logs in the left navigation of the application. This page typically displays: + +* Interaction records between users and AI within the selected timeframe +* The results of user input and AI output, which for conversational applications are usually a series of message flows +* Ratings from users and operators, as well as improvement annotations from operators + +The logs currently do not include interaction records from the Prompt debugging process. + +### Improvement Annotations + +{% hint style="info" %} +These annotations will be used for model fine-tuning in future versions of Dify to improve model accuracy and response style. The current preview version only supports annotations. +{% endhint %} + +\[Image] + +Clicking on a log entry will open the log details panel on the right side of the interface. In this panel, operators can annotate an interaction: + +* Give a thumbs up for well-performing messages +* Give a thumbs down for poorly-performing messages +* Mark improved responses for improvement, which represents the text you expect AI to reply with + +Please note that if multiple administrators in the team annotate the same log entry, the last annotation will overwrite the previous ones. + diff --git a/en/application/prompt-engineering/README.md b/en/application/prompt-engineering/README.md new file mode 100644 index 0000000..da564f6 --- /dev/null +++ b/en/application/prompt-engineering/README.md @@ -0,0 +1,40 @@ +--- +description: >- + Master the use of Dify for orchestrating applications and practicing Prompt + Engineering, and build high-value AI applications with the two built-in + application types. +--- + +# Prompt Engineering + +The core concept of Dify is the declarative definition of AI applications. Everything including Prompts, context, plugins, etc. can be described in a YAML file (which is why it is called Dify). It ultimately presents a single API or out-of-the-box WebApp. + +At the same time, Dify provides an easy-to-use Prompt orchestration interface where developers can visually orchestrate various application features based on Prompts. Doesn't it sound simple? + +For both simple and complex AI applications, good Prompts can effectively improve the quality of model output, reduce error rates, and meet the needs of specific scenarios. Dify currently provides two common application forms: conversational and text generator. This section will guide you through visually orchestrating AI applications. + +### Application Orchestration Steps + +1. Determine application scenarios and functional requirements +2. Design and test Prompts and model parameters +3. Orchestrate Prompts +4. Publish the application +5. Observe and continuously iterate + +### Hands-on Practice + +TODO + +### The Differences between Application Types + +Text generation and conversation applications in Dify have slight differences in prompt orchestration. Conversation applications require incorporating "conversation lifecycle" to meet more complex user scenarios and context management needs. + +Prompt Engineering has developed into a field with tremendous potential, worthy of continuous exploration. Please continue reading to learn about the orchestration guidelines for both types of applications. + +### Extended Reading + +1. [Learn Prompting](https://learnprompting.org/zh-Hans/) +2. [ChatGPT Prompt Engineering for Developers](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/) +3. [Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts) + + diff --git a/en/application/prompt-engineering/conversation-application.md b/en/application/prompt-engineering/conversation-application.md new file mode 100644 index 0000000..117a5ab --- /dev/null +++ b/en/application/prompt-engineering/conversation-application.md @@ -0,0 +1,89 @@ +# Conversation Application + +Conversation applications use a one-question-one-answer mode to have a continuous conversation with the user. + +### Applicable scenarios + +Conversation applications can be used in fields such as customer service, online education, healthcare, financial services, etc. These applications can help organizations improve work efficiency, reduce labor costs, and provide a better user experience. + +### How to compose + +Conversation applications supports: prompts, variables, context, opening remarks, and suggestions for the next question. + +Here, we use a interviewer application as an example to introduce the way to compose a conversation applications. + +#### Step 1 Create an application + +Click the "Create Application" button on the homepage to create an application. Fill in the application name, and select **"Chat App"** as the application type. + +

Create Application

+ +#### Step 2: Compose the Application + +After the application is successfully created, it will automatically redirect to the application overview page. Click on the left-hand menu: “**Prompt Eng.**” to compose the application. + +
+ + + +**2.1 Fill in Prompts** + +Prompts are used to give a series of instructions and constraints to the AI response. Form variables can be inserted, such as `{{input}}`. The value of variables in the prompts will be replaced with the value filled in by the user. + +The prompt we are filling in here is: + +> I want you to be the interviewer for the \{{jobName\}} position. I will be the candidate, and you will ask me interview questions for the position of \{{jobName\}} developer. I hope you will only answer as the interviewer. Don't write all the questions at once. I wish for you to only interview me. Ask me questions and wait for my answers. Don't write explanations. Ask me one by one like an interviewer and wait for my answer. +> +> When I am ready, you can start asking questions. + +![](<../../.gitbook/assets/image (38).png>) + + + +For a better experience, we will add an opening dialogue: `"Hello, {{name}}. I'm your interviewer, Bob. Are you ready?"` + +To add the opening dialogue, click the "Add Feature" button in the upper left corner, and enable the "Conversation remarkers" feature: + +
+ +And then edit the opening remarks: + +![](<../../.gitbook/assets/image (15).png>) + + + +**2.2 Adding Context** + +If an application wants to generate content based on private contextual conversations, it can use our [dataset](../../advanced/datasets/) feature. Click the "Add" button in the context to add a dataset. + +![](<../../.gitbook/assets/image (9).png>) + + + +**2.3 Debugging** + +We fill in the user input on the right side and debug the input content. + +![](<../../.gitbook/assets/image (11).png>) + +If the results are not satisfactory, you can adjust the prompts and model parameters. Click on the model name in the upper right corner to set the parameters of the model: + +![](<../../.gitbook/assets/image (29).png>) + +We support the GPT-4 model. + + + +**2.4 Publish** + +After debugging the application, click the **"Publish"** button in the upper right corner to save the current settings. + +### **Share Application** + +On the overview page, you can find the sharing address of the application. Click the "Preview" button to preview the shared application. Click the "Share" button to get the sharing link address. Click the "Settings" button to set the shared application information. + +
+ + + +If you want to customize the application that you share, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-conversation). Based on the template, you can modify the application to meet your specific needs and style requirements. diff --git a/en/application/prompt-engineering/text-generation-application.md b/en/application/prompt-engineering/text-generation-application.md new file mode 100644 index 0000000..277249d --- /dev/null +++ b/en/application/prompt-engineering/text-generation-application.md @@ -0,0 +1,76 @@ +# Text Generator + +Text generation applications are applications that can automatically generate high-quality text based on prompts provided by users. They can generate various types of text, such as article summaries, translations, etc. + +### **Applicable scenarios** + +Text generation applications are suitable for scenarios that require a large amount of text creation, such as news media, advertising, SEO, marketing, etc. They can provide efficient and fast text generation services for these industries, reduce labor costs, and improve production efficiency. + +### **How to c**ompose + +Text generation applications supports: prefix prompt words, variables, context, and generating more similar content. + +Here, we use a translation application as an example to introduce the way to compose a text generation applications. + +#### **Step 1: Create the application** + +Click the "Create Application" button on the homepage to create an application. Fill in the application name, and select "Text Generator" as the application type. + +

Create Application

+ +#### Step 2: Compose the Application + +After the application is successfully created, it will automatically redirect to the application overview page. Click on the left-hand menu: “**Prompt Eng.**” to compose the application. + +
+ +**2.1 Fill in Prefix Prompts** + +Prompts are used to give a series of instructions and constraints to the AI response. Form variables can be inserted, such as `{{input}}`. The value of variables in the prompts will be replaced with the value filled in by the user. + +The prompt we are filling in here is: `Translate the content to: {{language}}. The content is as follows:` + +![](<../../.gitbook/assets/image (7).png>) + + + +**2.2 Adding Context** + +If the application wants to generate content based on private contextual conversations, our [dataset](../../advanced/datasets/) feature can be used. Click the "Add" button in the context to add a dataset. + +![](<../../.gitbook/assets/image (12).png>) + + + +**2.3 Adding Future: Generate more like this** + +Generating more like this allows you to generate multiple texts at once, which you can edit and continue generating from. Click on the "Add Future" button in the upper left corner to enable this feature. + +
+ +**2.4 Debugging** + +We debug on the right side by entering variables and querying content. Click the "Run" button to view the results of the operation. + +![](<../../.gitbook/assets/image (17).png>) + +If the results are not satisfactory, you can adjust the prompts and model parameters. Click on the model name in the upper right corner to set the parameters of the model: + +![](<../../.gitbook/assets/image (36).png>) + + + +**2.5 Publish** + +After debugging the application, click the **"Publish"** button in the upper right corner to save the current settings. + +### **Share Application** + +You can find the sharing address of the application on the overview page. Click the "Preview" button to preview the shared application. Click the "Share" button to obtain the sharing link address. Click the "Settings" button to set the information of the shared application. + +
+ +If you want to customize the application shared outside, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-text-generator). Based on the template, you can modify the application to meet your specific situation and style requirements. + + + diff --git a/en/community/data-security.md b/en/community/data-security.md new file mode 100644 index 0000000..d76af23 --- /dev/null +++ b/en/community/data-security.md @@ -0,0 +1,13 @@ +# Data Security + +Thank you for your interest in the Dify product. Dify takes your data security very seriously. Please refer to our [\[Privacy Policy\]](https://docs.dify.ai/user-agreement/privacy-policy). + +What can be disclosed is that Dify's cloud service is located on US Azure, and only a very small number of authorized personnel can access user data after approval. In addition, our code is open source on GitHub. If you have concerns about the security of cloud services, you can use the self-deployed version. + +As our product is still in its early stages, there may be some areas where we are not yet perfect, but we do plan to obtain SOC2 and ISO27001 certifications. + +If you have any questions regarding commercialization, please contact business@dify.ai. + +In the self-deployed version of Dify, there is only one instance where the Dify server is called - to check for updates via the API functionality. This must be triggered by an administrator in the backend. There are no other remote server technologies used, so you can use it safely. + +If you still have concerns, you can protect your data through measures such as setting up firewalls. diff --git a/en/community/open-source.md b/en/community/open-source.md new file mode 100644 index 0000000..828927c --- /dev/null +++ b/en/community/open-source.md @@ -0,0 +1,33 @@ +# Open-Source License + +## Dify Open Source License + +The Dify project is licensed under the Apache License 2.0, with the following additional conditions: + +1. Dify is permitted to be used for commercialization, such as using Dify as a "backend-as-a-service" for your other applications, or delivering it to enterprises as an application development platform. However, when the following conditions are met, you must contact the producer to obtain a commercial license: + +* Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify.AI source code to operate a multi-tenant SaaS service that is similar to the Dify.AI service edition. +* LOGO and copyright information: In the process of using Dify, you may not remove or modify the LOGO or copyright information in the Dify console. + +Please contact business@dify.ai by email to inquire about licensing matters. + +2. As a contributor, you should agree that your contributed code: + +* The producer can adjust the open-source agreement to be more strict or relaxed. +* Can be used for commercial purposes, such as Dify's cloud business. + +Apart from this, all other rights and restrictions follow the Apache License 2.0. If you need more detailed information, you can refer to the full version of Apache License 2.0. + +The interactive design of this product is protected by appearance patent. + +© 2023 LangGenius, Inc. + +*** + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +``` +http://www.apache.org/licenses/LICENSE-2.0 +``` + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/en/community/support.md b/en/community/support.md new file mode 100644 index 0000000..d4d6c07 --- /dev/null +++ b/en/community/support.md @@ -0,0 +1,19 @@ +# Support + +If you still have questions or suggestions about using the product while reading this documentation, please try the following ways to seek support. Our team and community will do their best to help you. + +### Community Support + +{% hint style="info" %} +Please do not share your Dify account information or other sensitive information with the community. Our support staff will not ask for your account information. +{% endhint %} + +* Submit an Issue on [GitHub](https://github.com/langgenius/dify) +* Join the [Discord community](https://discord.com/invite/AhzKf7dNgk) +* Email [support@dify.ai](mailto:support@dify.ai) + +### Contact Us + +For matters other than product support. + +* Email [hello@dify.ai](mailto:hello@dify.ai) diff --git a/en/explore/chat.md b/en/explore/chat.md new file mode 100644 index 0000000..c4c0f47 --- /dev/null +++ b/en/explore/chat.md @@ -0,0 +1,60 @@ +# Chat + +Chat in explore is a conversational application used to explore the boundaries of Dify's capabilities. + +When we talk to large natural language models, we often encounter situations where the answers are outdated or invalid. This is due to the old training data of the large model and the lack of networking capabilities. Based on the large model, Chat uses agents to capabilities and some tools endow the large model with the ability of online real-time query. + +
+ +Chat supports the use of plugins and datasets. + +### Use plugins + +LLM(Large language model)cannot be networked and invoke external tools. But this cannot meet the actual usage scenarios, such as: + +* When we want to know the weather today, we need to be connected to the Internet. +* When we want to summarize the content of a web page, we need to use an external tool: read the content of the web page. + +The above problem can be solved by using the agent mode: when the LLM cannot answer the user's question, it will try to use the existing plugins to answer the question. + +{% hint style="info" %} +In Dify, we use different proxy strategies for different models. The proxy strategy used by OpenAI's model is **GPT function call**. Another model used is **ReACT**. The current test experience is that the effect of **GPT function call** is better. To know more, you can read the link below: + +* [Function calling and other API updates](https://openai.com/blog/function-calling-and-other-api-updates) +* [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) +{% endhint %} + +Currently we support the following plugins: + +* Google Search. The plugin searches Google for answers. +* Web Reader. The plugin reads the content of linked web pages. +* Wikipedia. The plugin searches Wikipedia for answers. + +We can choose the plugins needed for this conversation before the conversation starts. + +
+ +If you use the Google search plugin, you need to configure the SerpAPI key. + +
+ +Configured entry: + +
+ +### Use datasets + +Chat supports datasets. After selecting the datasets, the questions asked by the user are related to the content of the data set, and the model will find the answer from the data set. + +We can select the datasets needed for this conversation before the conversation starts. + +
+ + + +### The process of thinking + +The thinking process refers to the process of the model using plugins and datasets. We can see the thought process in each answer. + +
+ diff --git a/en/getting-started/cloud.md b/en/getting-started/cloud.md new file mode 100644 index 0000000..e78a374 --- /dev/null +++ b/en/getting-started/cloud.md @@ -0,0 +1,13 @@ +# Cloud + +{% hint style="info" %} +Note: Dify is currently in the Beta testing phase. If there are inconsistencies between the documentation and the product, please refer to the actual product experience. +{% endhint %} + +Dify offers a [cloud service](http://cloud.dify.ai) for everyone, so you can use the full functionality of LangGenius without deploying it yourself. To use the cloud version of LangGenius, you need to have a GitHub or Google account. + +1. Log in to [Dify Cloud](https://cloud.dify.ai) and create a new Workspace or join an existing one +2. Configure your model provider or use our hosted model provider +3. You can [create an application](../application/creating-an-application.md) now! + +Currently, we don't have a pricing plan. If you like this LLMOps product, please introduce it to your friends😄. diff --git a/en/getting-started/faq/README.md b/en/getting-started/faq/README.md new file mode 100644 index 0000000..a39c1bc --- /dev/null +++ b/en/getting-started/faq/README.md @@ -0,0 +1,2 @@ +# FAQ + diff --git a/en/getting-started/faq/api-use-faq.md b/en/getting-started/faq/api-use-faq.md new file mode 100644 index 0000000..1709e5d --- /dev/null +++ b/en/getting-started/faq/api-use-faq.md @@ -0,0 +1,11 @@ +# API-use-FAQ + +## 1. What is a Bearer Token? + +Bearer authentication (also called token authentication) is an HTTP authentication scheme that involves security tokens called bearer tokens. The name “Bearer authentication” can be understood as “give access to the bearer of this token.” The bearer token is a cryptic string, usually generated by the server in response to a login request. The client must send this token in the Authorization header when making requests to protected resources: + +``` +Authorization: Bearer +``` + +The Bearer authentication scheme was originally created as part of OAuth 2.0 in RFC 6750, but is sometimes also used on its own. Similarly to Basic authentication, Bearer authentication should only be used over HTTPS (SSL). diff --git a/en/getting-started/faq/install-faq.md b/en/getting-started/faq/install-faq.md new file mode 100644 index 0000000..632b889 --- /dev/null +++ b/en/getting-started/faq/install-faq.md @@ -0,0 +1,125 @@ +# Install FAQ + +### 1. How to reset the password if the local deployment initialization fails with an incorrect password? + +If deployed using docker compose, you can execute the following command to reset the password: +`docker exec -it docker-api-1 flask reset-password` +Enter the account email and twice new passwords, and it will be reset. + +### 2. How to resolve File not found error in the log when deploying locally? + +``` +ERROR:root:Unknown Error in completion +Traceback (most recent call last): + File "/www/wwwroot/dify/dify/api/libs/rsa.py", line 45, in decrypt + private_key = storage.load(filepath) + File "/www/wwwroot/dify/dify/api/extensions/ext_storage.py", line 65, in load + raise FileNotFoundError("File not found") +FileNotFoundError: File not found +``` + +This error may be caused by switching deployment methods, or deleting the `api/storage/privkeys` file, which is used to encrypt large model keys and can not be reversed if lost. You can reset the encryption public and private keys with the following command: + +* Docker compose deployment + +``` +docker exec -it docker-api-1 flask reset-encrypt-key-pair +``` + +* Source code startup + +Enter the api directory + +``` +flask reset-encrypt-key-pair +``` + +Follow the prompts to reset. + +### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? + +This may be due to switching the domain name/website, causing cross-domain between front-end and server-side. Cross-domain and identity involve two configuration items: + +**CORS cross-domain configuration** + +`CONSOLE_CORS_ALLOW_ORIGINS` Console CORS cross-domain policy, default to `*`, which allows access from all domain names. +`WEB_API_CORS_ALLOW_ORIGINS` WebAPP CORS cross-domain strategy, default to `*`, which allows access from all domain names. + +**Cookie policy configuration** + +The cookie policy is divided into three configurations `HttpOnly`, `SameSite` and `Secure`. + +`HttpOnly`: Default to true, normally does not need to be modified, used to prevent XSS attacks, that is, JS can not get the content of the cookie, only carry it on Http requests. + +`SameSite`: Divided into three gears, Strict, Lax and None, but because Dify needs to be able to get identity information from cookies when authorizing callback from external domains such as Github and Google, it can only be chosen between Lax and None, of which None can be completely cross-domain accessed. + +`Secure`: This parameter restricts whether the server interface must be under HTTPS in order for the Cookie to be saved locally, and it must be true in cross-domain scenarios (except for localhost / 127.0.0.1 on different ports), otherwise the browser will not pass. + +**Recommended Configuration** + +According to the configuration description, we recommend the following configuration in these three scenarios: +1. Local debug (default policy) +Development mode same domain policy. Support HTTP / HTTPS protocol, but need to ensure that the front-end page and interface are under the same domain. + +``` +WEB_API_CORS_ALLOW_ORIGINS:'' +CONSOLE_CORS_ALLOW_ORIGINS: '' +COOKIE_HTTPONLY:'true' +COOKIE_SAMESITE: 'Lax' +COOKIE_SECURE: 'false' +``` + +2. Cross-Domain Policy (do not use in production) +Cross-domain between server and web client, server must be HTTPS. Since SameSite=None must be coupled with Secure=true, the server must be in the `HTTPS` protocol in order to cross-domain access, which can be used in the server remotely and provide `HTTPS` protocol support, or local start-up server and front-end project (localhost, but different ports, tested available, although prompt warning). + +``` +WEB_API_CORS_ALLOW_ORIGINS: 'https://your-domain-for-web-app' +CONSOLE_CORS_ALLOW_ORIGINS: 'https://your-domain-for-console' +COOKIE_HTTPONLY: 'true' +COOKIE_SAMESITE: 'None' +COOKIE_SECURE: 'true' +``` + +3.Production Policy +Strict Mode. Due to the need to support callbacks and cookies for some third-party integration, it is not possible to use the highest Strict policy, so it is necessary to strictly limit the CORS domain name and set the cookie policy to SameSite=Lax, Secure=true. + +``` +WEB_API_CORS_ALLOW_ORIGINS: 'https://your-domain-for-web-app' +CONSOLE_CORS_ALLOW_ORIGINS: 'https://your-domain-for-console' +COOKIE_HTTPONLY: 'true' +COOKIE_SAMESITE: 'Lax' +COOKIE_SECURE: 'true' +``` + +Unavailable scenarios +When the front end and back end are cross-domain and the server-side is http protocol, no Cookie policy can support this scenario. Please adjust the back end to HTTPS protocol or set to the same domain. + +### 4. After starting, the page keeps loading and checking the request prompts CORS error? + +This may be because the domain name/URL has been switched, resulting in cross-domain between the front end and the back end. Please change all the following configuration items in `docker-compose.yml` to the new domain name: +`CONSOLE_API_URL:` The backend URL of the console API. +`CONSOLE_WEB_URL:` The front-end URL of the console web. +`SERVICE_API_URL:` Service API Url +`APP_API_URL:` WebApp API backend Url. +`APP_WEB_URL:` WebApp Url. + +For more information, please check out: [Environments](../install-self-hosted/environments.md) + +### 5. How to upgrade version after deployment? + +If you start up through images, please pull the latest images to complete the upgrade. If you start up through source code, please pull the latest code and then start up to complete the upgrade. + +### 6.How to configure the environment variables when use Notion import + +**Q: What is the Notion's Integration configuration address?** + +A: [https://www.notion.so/my-integrations](https://www.notion.so/my-integrations) + +**Q: Which environment variables need to be configured?** + +A: Please set below configuration when doing the privatized deployment + +1. **`NOTION_INTEGRATION_TYPE`** : The value should configrate as (**public/internal**). Since the Redirect address of Notion’s Oauth only supports https, if it is deployed locally, please use Notion’s internal integration +2. **`NOTION_CLIENT_SECRET`** : Notion OAuth client secret (userd for public integration type) +3. **`NOTION_CLIENT_ID`** : OAuth client ID (userd for public integration type) +4. **`NOTION_INTERNAL_SECRET`** : Notion Internal Integration Secret, If the value of `NOTION_INTEGRATION_TYPE` is **internal** ,you need to configure this variable. diff --git a/en/getting-started/faq/llms-use-faq.md b/en/getting-started/faq/llms-use-faq.md new file mode 100644 index 0000000..8a57802 --- /dev/null +++ b/en/getting-started/faq/llms-use-faq.md @@ -0,0 +1,33 @@ +# LLMs-use-FAQ + +### 1. How to choose a basic model? + +**gpt-3.5-turbo** +•gpt-3.5-turbo is an upgraded version of the gpt-3 model series. It is more powerful than gpt-3 and can handle more complex tasks. It has significant improvements in understanding long text and cross-document reasoning. Gpt-3.5 turbo can generate more coherent and persuasive text. It also has great improvements in summarization, translation and creative writing. **Good at: Long text understanding, cross-document reasoning, summary, translation, creative writing** + +**gpt-4** +•gpt-4 is the latest and most powerful Transformer language model. It has nearly 200 billion pre-trained parameters, making it state-of-the-art on all language tasks, especially those requiring deep understanding and generation of long, complex responses. Gpt-4 can handle all aspects of human language, including understanding abstract concepts and cross-page reasoning. Gpt-4 is the first true general language understanding system that can handle any natural language processing task in the field of artificial intelligence. **Good at: \*All NLP tasks, language understanding, long text generation, cross-document reasoning, understanding abstract concepts\***Please refer to: [https://platform.openai.com/docs/models/overview](https://platform.openai.com/docs/models/overview) + +### 2. Why is it recommended to set max_tokens smaller? + +Because in natural language processing, longer text outputs usually require longer computation time and more computing resources. Therefore, limiting the length of the output text can reduce the computational cost and time to some extent. For example, set: max_tokens=500, which means that only the first 500 tokens of the output text are considered, and the part exceeding this length will be discarded. The purpose of doing so is to ensure that the length of the output text does not exceed the acceptable range of the LLM, while making full use of computing resources to improve the efficiency of the model. On the other hand, more often limiting max_tokens can increase the length of the prompt, such as the limit of gpt-3.5-turbo is 4097 tokens, if you set max_tokens=4000, then only 97 tokens are left for the prompt, and an error will be reported if exceeded. + +### 3. How to split long text data in the dataset reasonably? + +In some natural language processing applications, text is often split into paragraphs or sentences for better processing and understanding of semantic and structural information in the text. The minimum splitting unit depends on the specific task and technical implementation. For example: + +• For text classification tasks, text is usually split into sentences or paragraphs. + +• For machine translation tasks, entire sentences or paragraphs need to be used as splitting units. + +Finally, experiments and evaluations are still needed to determine the most suitable embedding technology and splitting unit. The performance of different technologies and splitting units can be compared on the test set to select the optimal scheme. + +### 4. What distance function did we use when getting dataset segmentation? + +We use [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity). The choice of distance function is usually irrelevant. OpenAI embeddings are normalized to length 1, which means: + +•Using the dot product to calculate cosine similarity can be slightly faster •Cosine similarity and Euclidean distance will lead to the same ranking + +After the embedding vectors are normalized to length 1, calculating the cosine similarity between two vectors can be simplified to their dot product. Because the normalized vectors have a length of 1, the result of the dot product is equal to the result of the cosine similarity. + +Since the dot product calculation is faster than other similarity metrics (such as Euclidean distance), using normalized vectors for dot product calculation can slightly improve computational efficiency. diff --git a/en/getting-started/install-self-hosted/README.md b/en/getting-started/install-self-hosted/README.md new file mode 100644 index 0000000..5bb5ecc --- /dev/null +++ b/en/getting-started/install-self-hosted/README.md @@ -0,0 +1,77 @@ +# Install(Self hosted) + +The Dify Self hosted Edition, which is the open-source on [GitHub](https://github.com/langgenius/dify), can be deployed in one of the following two ways: + +1. [Docker Compose Deployment](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted/docker-compose) +2. [Local Source Code Start](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted/local-source-code) + + + +### FAQ + +* **The page keeps loading after startup, and I see a CORS error in the request.** + + This may be due to a domain/URL change, causing a cross-origin issue between the frontend and backend. Please update the following configuration items in the docker-compose.yml file with the new domain: + + `CONSOLE_URL`: Console domain, e.g., `http://localhost:8080` + + `API_URL`: Service API domain + + `APP_URL`: Web APP domain +* **After installation, I can't log in. Although the login is successful, all subsequent API calls return a 401 error.** + + This issue may be related to cross-origin problems causing the cookie policy to fail. You can configure it according to the following strategies: + + * Default Strategy + + This strategy is suitable for local debugging and supports both HTTP and HTTPS protocols, but it requires the frontend and API to be on the same domain. + + ``` + WEB_API_CORS_ALLOW_ORIGINS: '*' + CONSOLE_CORS_ALLOW_ORIGINS: '*' + COOKIE_HTTPONLY: 'true' + COOKIE_SAMESITE: 'Lax' + COOKIE_SECURE: 'false' + ``` + * Cross-Origin Strategy (Do not use in production) + + Since SameSite=None must be used with Secure=true, the server must use the HTTPS protocol to enable cross-origin access. This strategy can be used when the server is remote and supports HTTPS, or when running the frontend and backend projects separately locally (localhost but on different ports, it works but may show a warning). + + ``` + WEB_API_CORS_ALLOW_ORIGINS: '*' + CONSOLE_CORS_ALLOW_ORIGINS: '*' + COOKIE_HTTPONLY: 'true' + COOKIE_SAMESITE: 'Lax' + COOKIE_SECURE: 'false' + ``` + * Production Strategy + + Due to the requirement of supporting callback with cookie information for some third-party integrations, the strictest Strict strategy cannot be used. Therefore, CORS domains need to be strictly limited, and the cookie policy should be set to SameSite=Lax Secure=true. + + ``` + WEB_API_CORS_ALLOW_ORIGINS: 'https://your-domain-for-web-app' + CONSOLE_CORS_ALLOW_ORIGINS: 'https://your-domain-for-console' + COOKIE_HTTPONLY: 'true' + COOKIE_SAMESITE: 'Lax' + COOKIE_SECURE: 'true' + ``` +* **How to configure and use Azure OpenAI** + + Currently, Azure OpenAI support is not fully available, and you need to create a deployment with the specified names to use it: + + * gpt-35-turbo + * gpt-4 + * text-davinci-003 + * text-embedding-ada-002 + + Please note that all deployment names should not contain periods ("."). + + Looks like: + + ![](<../../.gitbook/assets/image (33).png>) + +### Contributing + +To ensure proper review, all code contributions - including those from contributors with direct commit access - must be submitted via pull requests and approved by the core development team prior to being merged. + +We welcome all pull requests! If you'd like to help, check out the [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) for more information on how to get started. diff --git a/en/getting-started/install-self-hosted/docker-compose.md b/en/getting-started/install-self-hosted/docker-compose.md new file mode 100644 index 0000000..81ba845 --- /dev/null +++ b/en/getting-started/install-self-hosted/docker-compose.md @@ -0,0 +1,60 @@ +# Docker Compose Deployment + +## Prerequisites + +| Operating System | Software | Explanation | +| -------------------------- | -------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| macOS 10.14 or later | Docker Desktop | Set the Docker virtual machine (VM) to use a minimum of 2 virtual CPUs (vCPUs) and 8 GB of initial memory. Otherwise, the installation may fail. For more information, please refer to the [Docker Desktop installation guide for Mac](https://docs.docker.com/desktop/mac/install/). | +| Linux platforms |

Docker 19.03 or later
Docker Compose 1.25.1 or later

| Please refer to the [Docker installation guide](https://docs.docker.com/engine/install/) and [the Docker Compose installation guide](https://docs.docker.com/compose/install/) for more information on how to install Docker and Docker Compose, respectively. | +| Windows with WSL 2 enabled | Docker Desktop | We recommend storing the source code and other data that is bound to Linux containers in the Linux file system rather than the Windows file system. For more information, please refer to the [Docker Desktop installation guide for using the WSL 2 backend on Windows.](https://docs.docker.com/desktop/windows/install/#wsl-2-backend) | + +### Clone Dify + +Clone the Dify source code to your local machine: + +```Shell +git clone https://github.com/langgenius/dify.git +``` + +### Start Dify + +Navigate to the docker directory in the Dify source code and execute the following command to start Dify: + +```Shell +cd dify/docker +docker compose up -d +``` + +> If your system has Docker Compose V2 installed instead of V1, use `docker compose` instead of `docker-compose`. Check if this is the case by running `$ docker compose version`. [Read more information here](https://docs.docker.com/compose/#compose-v2-and-the-new-docker-compose-command). + +Deployment Results: + +```Shell +[+] Running 7/7 + ✔ Container docker-web-1 Started 1.0s + ✔ Container docker-redis-1 Started 1.1s + ✔ Container docker-weaviate-1 Started 0.9s + ✔ Container docker-db-1 Started 0.0s + ✔ Container docker-worker-1 Started 0.7s + ✔ Container docker-api-1 Started 0.8s + ✔ Container docker-nginx-1 Started +``` + +Finally, check if all containers are running successfully: + +```Shell +docker compose ps +``` + +This includes 3 business services: api / worker / web, and 4 underlying components: weaviate / db / redis / nginx. + +```Shell +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +docker-api-1 langgenius/dify-api:0.3.2 "/entrypoint.sh" api 4 seconds ago Up 2 seconds 80/tcp, 5001/tcp +docker-db-1 postgres:15-alpine "docker-entrypoint.s…" db 4 seconds ago Up 2 seconds 0.0.0.0:5432->5432/tcp +docker-nginx-1 nginx:latest "/docker-entrypoint.…" nginx 4 seconds ago Up 2 seconds 0.0.0.0:80->80/tcp +docker-redis-1 redis:6-alpine "docker-entrypoint.s…" redis 4 seconds ago Up 3 seconds 6379/tcp +docker-weaviate-1 semitechnologies/weaviate:1.18.4 "/bin/weaviate --hos…" weaviate 4 seconds ago Up 3 seconds +docker-web-1 langgenius/dify-web:0.3.2 "/entrypoint.sh" web 4 seconds ago Up 3 seconds 80/tcp, 3000/tcp +docker-worker-1 langgenius/dify-api:0.3.2 "/entrypoint.sh" worker 4 seconds ago Up 2 seconds 80/tcp, 5001/tcp +``` diff --git a/en/getting-started/install-self-hosted/environments.md b/en/getting-started/install-self-hosted/environments.md new file mode 100644 index 0000000..2f79092 --- /dev/null +++ b/en/getting-started/install-self-hosted/environments.md @@ -0,0 +1,385 @@ +# Environments + +### Common Variables + +#### EDITION + +Deployment version. + +* `SELF_HOSTED`: Self-hosted version + * Only supports single team/tenant mode + * Can only use email and password to log in + * No trial hosted OpenAI API-Key feature +* `CLOUD`: Cloud version + * Supports multi-team/tenant mode + * Unable to log in using email and password, only supports GitHub, Google authorization login. + * Has 200 trials hosted OpenAI API-Key feature + +#### CONSOLE_API_URL + +The backend URL of the console API, used to concatenate the authorization callback. If empty, it is the same domain. Example: `https://api.console.dify.ai` + +#### CONSOLE_WEB_URL + +The front-end URL of the console web, used to concatenate some front-end addresses and for CORS configuration use. If empty, it is the same domain. Example: `https://console.dify.ai` + +> Starting from version `0.3.8`, `CONSOLE_URL` has been split into `CONSOLE_API_URL` and `CONSOLE_WEB_URL`, but `CONSOLE_URL` is still available. + +#### SERVICE_API_URL + +Service API Url, used to display Service API Base Url to the front-end. If empty, it is the same domain. Example: `https://api.dify.ai` + +> Starting from version `0.3.8`, `API_URL` has been renamed to `SERVICE_API_URL`, but `API_URL` is still available. + +#### APP_API_URL + +WebApp API backend Url, used to declare the back-end URL for the front-end API. If empty, it is the same domain. Example: `https://app.dify.ai` + +#### APP_WEB_URL + +WebApp Url, used to display WebAPP API Base Url to the front-end. If empty, it is the same domain. Example: `https://api.app.dify.ai` + +> Starting from version `0.3.8`, `APP_URL` has been split into `APP_API_URL` and `APP_WEB_URL`, but `APP_URL` is still available. + +### Server + +#### MODE + +Startup mode, only available when starting with docker, not effective when starting from source code. + +* api + + Start API Server. +* worker + + Start asynchronous queue worker. + +#### DEBUG + +Debug mode, default is false. It is recommended to turn on this configuration for local development to prevent some problems caused by monkey patch. + +#### FLASK_DEBUG + +Flask debug mode, it can output trace information at the interface when turned on, which is convenient for debugging. + +#### SECRET_KEY + +A key used to securely sign session cookies and encrypt sensitive information in the database. + +This variable needs to be set when starting for the first time. + +You can use `openssl rand -base64 42` to generate a strong key. + +#### DEPLOY_ENV + +Deployment environment. + +* PRODUCTION (default) + + Production environment. +* TESTING + + Testing environment. There will be a distinct color label on the front-end page, indicating that this environment is a testing environment. + +#### LOG_LEVEL + +Log output level, default is INFO. + +It is recommended to set it to ERROR for production. + +#### MIGRATION_ENABLED + +When set to true, the database migration will be automatically executed when the container starts, only available when starting with docker, not effective when starting from source code. + +You need to manually execute `flask db upgrade` in the api directory when starting from source code. + +#### CHECK_UPDATE_URL + +Whether to enable the version check policy. If set to false, `https://updates.dify.ai` will not be called for version check. + +Since the version interface based on CloudFlare Worker cannot be directly accessed in China at present, setting this variable to empty can shield this interface call. + +#### OPENAI_API_BASE + +Used to change the OpenAI base address, default is [https://api.openai.com/v1](https://api.openai.com/v1). + +When OpenAI cannot be accessed in China, replace it with a domestic mirror address, or when a local model provides OpenAI compatible API, it can be replaced. + +#### Container Startup Related Configuration + +Only effective when starting with docker image or docker-compose. + +* DIFY_BIND_ADDRESS + + API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +* DIFY_PORT + + API service binding port number, default 5001. +* SERVER_WORKER_AMOUNT + + The number of API server workers, i.e., the number of gevent workers. Formula: `number of cpu cores x 2 + 1` + + Reference: [https://docs.gunicorn.org/en/stable/design.html#how-many-workers](https://docs.gunicorn.org/en/stable/design.html#how-many-workers) +* SERVER_WORKER_CLASS + + Defaults to gevent. If using windows, it can be switched to sync or solo. +* GUNICORN_TIMEOUT + + Request handling timeout. The default is 200, it is recommended to set it to 360 to support a longer sse connection time. +* CELERY_WORKER_CLASS + + Similar to `SERVER_WORKER_CLASS`. Default is gevent. If using windows, it can be switched to sync or solo. +* CELERY_WORKER_AMOUNT + + The number of Celery workers. The default is 1, and can be set as needed. + +#### Database Configuration + +The database uses PostgreSQL. Please use the public schema. + +* DB_USERNAME: username +* DB_PASSWORD: password +* DB_HOST: database host +* DB_PORT: database port number, default is 5432 +* DB_DATABASE: database name +* SQLALCHEMY_POOL_SIZE: The size of the database connection pool. The default is 30 connections, which can be appropriately increased. +* SQLALCHEMY_POOL_RECYCLE: Database connection pool recycling time, the default is 3600 seconds. +* SQLALCHEMY_ECHO: Whether to print SQL, default is false. + +#### Redis Configuration + +This Redis configuration is used for caching and for pub/sub during conversation. + +* REDIS_HOST: Redis host +* REDIS_PORT: Redis port, default is 6379 +* REDIS_DB: Redis Database, default is 0. Please use a different Database from Session Redis and Celery Broker. +* REDIS_USERNAME: Redis username, default is empty +* REDIS_PASSWORD: Redis password, default is empty. It is strongly recommended to set a password. +* REDIS_USE_SSL: Whether to use SSL protocol for connection, default is false + +#### Session Configuration + +Only used by the API service for interface identity verification. + +* SESSION_TYPE: + + Session component type + + * redis (default) + + If you choose this, you need to set the environment variables starting with SESSION_REDIS_ below. + * sqlalchemy + + If you choose this, the current database connection will be used and the sessions table will be used to read and write session records. +* SESSION_REDIS_HOST: Redis host +* SESSION_REDIS_PORT: Redis port, default is 6379 +* SESSION_REDIS_DB: Redis Database, default is 0. Please use a different Database from Redis and Celery Broker. +* SESSION_REDIS_USERNAME: Redis username, default is empty +* SESSION_REDIS_PASSWORD: Redis password, default is empty. It is strongly recommended to set a password. +* SESSION_REDIS_USE_SSL: Whether to use SSL protocol for connection, default is false + +#### Celery Configuration + +* CELERY_BROKER_URL + + Format as follows: + + ``` + redis://:@:/ + ``` + + Example: `redis://:difyai123456@redis:6379/1` +* BROKER_USE_SSL + + If set to true, use SSL protocol for connection, default is false + +#### CORS Configuration + +Used to set the front-end cross-domain access policy. + +* CONSOLE_CORS_ALLOW_ORIGINS + + Console CORS cross-domain policy, default is `*`, that is, all domains can access. +* WEB_API_CORS_ALLOW_ORIGINS + + WebAPP CORS cross-domain policy, default is `*`, that is, all domains can access. + +For detailed configuration, please refer to: [Cross-domain/identity related guide](https://avytux375gg.feishu.cn/wiki/HyX3wdF1YiejX3k3U2CcTcmQnjg) + +#### Cookie Policy Configuration + +Used to set the browser policy for session cookies used for identity verification. + +* COOKIE_HTTPONLY + + Cookie HttpOnly configuration, default is true. +* COOKIE_SAMESITE + + Cookie SameSite configuration, default is Lax. +* COOKIE_SECURE + + Cookie Secure configuration, default is false. + +For detailed configuration, please refer to: [Cross-domain/identity related guide](https://avytux375gg.feishu.cn/wiki/HyX3wdF1YiejX3k3U2CcTcmQnjg) + +#### File Storage Configuration + +Used to store uploaded data set files, team/tenant encryption keys, and other files. + +* STORAGE_TYPE + + Type of storage facility + + * local (default) + + Local file storage, if this option is selected, the following `STORAGE_LOCAL_PATH` configuration needs to be set. + * s3 + + S3 object storage, if this option is selected, the following S3_ prefixed configurations need to be set. +* STORAGE_LOCAL_PATH + + Default is storage, that is, it is stored in the storage directory of the current directory. + + If you are deploying with docker or docker-compose, be sure to mount the `/app/api/storage` directory in both containers to the same local directory, otherwise, you may encounter file not found errors. +* S3_ENDPOINT: S3 endpoint address +* S3_BUCKET_NAME: S3 bucket name +* S3_ACCESS_KEY: S3 Access Key +* S3_SECRET_KEY: S3 Secret Key +* S3_REGION: S3 region information, such as: us-east-1 + +#### Vector Database Configuration + +* VECTOR_STORE + + The available enum types include: `weaviate`, `qdrant`, `pinecone`, `milvus` (the last two are not yet available) + + Both `milvus` and `zilliz` use the same configuration, both being `milvus`. +* WEAVIATE_ENDPOINT + + Weaviate endpoint address, such as: `http://weaviate:8080`. +* WEAVIATE_API_KEY + + The api-key credential used to connect to Weaviate. +* WEAVIATE_BATCH_SIZE + + The number of index Objects created in batches in Weaviate, default is 100. + + Refer to this document: [https://weaviate.io/developers/weaviate/manage-data/import#how-to-set-batch-parameters](https://weaviate.io/developers/weaviate/manage-data/import#how-to-set-batch-parameters) +* WEAVIATE_GRPC_ENABLED + + Whether to use the gRPC method to interact with Weaviate, performance will greatly increase when enabled, may not be usable locally, default is true. +* QDRANT_URL + + Qdrant endpoint address, such as: `https://your-qdrant-cluster-url.qdrant.tech/` +* QDRANT_API_KEY + + The api-key credential used to connect to Qdrant. +* PINECONE_API_KEY + + The api-key credential used to connect to Pinecone. +* PINECONE_ENVIRONMENT + + The environment where Pinecone is located, such as: `us-east4-gcp` +* MILVUS_HOST + + Milvus host configuration. +* MILVUS_PORT + + Milvus port configuration. +* MILVUS_USER + + Milvus user configuration, default is empty. +* MILVUS_PASSWORD + + Milvus password configuration, default is empty. +* MILVUS_USE_SECURE + + Whether Milvus uses SSL connection, default is false. + +#### Dataset Configuration + +* UPLOAD_FILE_SIZE_LIMIT: + + Upload file size limit, default 15M. +* UPLOAD_FILE_BATCH_LIMIT: + + Number of files that can be uploaded in batch, default 5. + + +#### Sentry Configuration + +Used for application monitoring and error log tracking. + +* SENTRY_DSN + + Sentry DSN address, default is empty, when empty, all monitoring information is not reported to Sentry. +* SENTRY_TRACES_SAMPLE_RATE + + The reporting ratio of Sentry events, if it is 0.01, it is 1%. +* SENTRY_PROFILES_SAMPLE_RATE + + The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. + +#### Notion Integration Configuration + +Notion integration configuration, variables can be obtained by applying for Notion integration: [https://www.notion.so/my-integrations](https://www.notion.so/my-integrations) + +* NOTION_CLIENT_ID +* NOTION_CLIENT_SECRET + +#### Mail related configuration + +* MAIL_TYPE + + The type of mail provider, currently only supports: resend (https://resend.com). If left empty, no mail will be sent. +* MAIL_DEFAULT_SEND_FROM + + The sender's email name, such as: no-reply [no-reply@dify.ai](mailto:no-reply@dify.ai), not mandatory. +* RESEND_API_KEY + + API-Key for the Resend email provider, can be obtained from API-Key. + +#### Third-Party Authorization Settings + +Only available for cloud version. + +* GITHUB_CLIENT_ID: GitHub authorization login Client ID +* GITHUB_CLIENT_SECRET: GitHub authorization login Client Secret +* GOOGLE_CLIENT_ID: Google authorization login Client ID +* GOOGLE_CLIENT_SECRET: Google authorization login Client Secret + +#### Platform Hosting Model Related Configuration + +Only available for cloud version, used for model hosting configuration. + +* HOSTED_OPENAI_ENABLED: Enable OpenAI hosted service, default False +* HOSTED_OPENAI_API_KEY: OpenAI hosted service API key +* HOSTED_OPENAI_API_BASE: OpenAI hosted service API base URL, default is empty, i.e. `https://api.openai.com/v1` +* HOSTED_OPENAI_API_ORGANIZATION: OpenAI hosted service organization ID, default is empty +* HOSTED_OPENAI_QUOTA_LIMIT: OpenAI hosted service default trial quota (unit: call count), default 200 calls +* HOSTED_OPENAI_PAID_ENABLED: Enable OpenAI hosted paid service, default False +* HOSTED_OPENAI_PAID_STRIPE_PRICE_ID: OpenAI hosted paid service Stripe price ID +* HOSTED_OPENAI_PAID_INCREASE_QUOTA: Increase quota amount after payment for OpenAI hosted paid service +* HOSTED_AZURE_OPENAI_ENABLED: Enable Azure OpenAI hosted service, default False +* HOSTED_AZURE_OPENAI_API_KEY: Azure OpenAI hosted service API key +* HOSTED_AZURE_OPENAI_API_BASE: Azure OpenAI hosted service API base URL +* HOSTED_AZURE_OPENAI_QUOTA_LIMIT: Azure OpenAI hosted service default trial quota (unit: call count) +* HOSTED_ANTHROPIC_ENABLED: Enable Anthropic hosted service, default False +* HOSTED_ANTHROPIC_API_BASE: Anthropic hosted service API base URL, default is empty +* HOSTED_ANTHROPIC_API_KEY: Anthropic hosted service API key +* HOSTED_ANTHROPIC_QUOTA_LIMIT: Anthropic hosted service default trial quota (unit: tokens), default 600,000 tokens +* HOSTED_ANTHROPIC_PAID_ENABLED: Enable Anthropic hosted paid service, default False +* HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID: Anthropic hosted paid service Stripe price ID +* HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA: Increase quota amount for Anthropic hosted paid service +* HOSTED_ANTHROPIC_PAID_MIN_QUANTITY: Minimum purchase quantity for Anthropic hosted paid service +* HOSTED_ANTHROPIC_PAID_MAX_QUANTITY: Maximum purchase quantity for Anthropic hosted paid service +* STRIPE_API_KEY: Stripe's API key +* STRIPE_WEBHOOK_SECRET: Stripe's Webhook secret + +*** + +### Web Frontend + +#### SENTRY_DSN + +Sentry DSN address, default is empty, when empty, all monitoring information is not reported to Sentry. diff --git a/en/getting-started/install-self-hosted/local-source-code.md b/en/getting-started/install-self-hosted/local-source-code.md new file mode 100644 index 0000000..9168d97 --- /dev/null +++ b/en/getting-started/install-self-hosted/local-source-code.md @@ -0,0 +1,223 @@ +# Local Source Code Start + +## Prerequisites + +| Operating System | Software | Explanation | +| -------------------------- | -------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| macOS 10.14 or later | Docker Desktop | Set the Docker virtual machine (VM) to use a minimum of 2 virtual CPUs (vCPUs) and 8 GB of initial memory. Otherwise, the installation may fail. For more information, please refer to the [Docker Desktop installation guide for Mac](https://docs.docker.com/desktop/mac/install/). | +| Linux platforms |

Docker 19.03 or later
Docker Compose 1.25.1 or later

| Please refer to the [Docker installation guide](https://docs.docker.com/engine/install/) and [the Docker Compose installation guide](https://docs.docker.com/compose/install/) for more information on how to install Docker and Docker Compose, respectively. | +| Windows with WSL 2 enabled |

Docker Desktop

| We recommend storing the source code and other data that is bound to Linux containers in the Linux file system rather than the Windows file system. For more information, please refer to the [Docker Desktop installation guide for using the WSL 2 backend on Windows.](https://docs.docker.com/desktop/windows/install/#wsl-2-backend) | + +### Clone Dify + +```Bash +git clone https://github.com/langgenius/dify.git +``` + +Before enabling business services, we need to first deploy PostgresSQL / Redis / Weaviate (if not locally available). We can start them with the following commands: + +```Bash +cd docker +docker compose -f docker-compose.middleware.yaml up -d +``` + +*** + +### Server Deployment + +* API Interface Service +* Worker Asynchronous Queue Consumption Service + +#### Installation of the basic environment: + +Server startup requires Python 3.10.x. It is recommended to use [Anaconda](https://docs.anaconda.com/free/anaconda/install/) for quick installation of the Python environment, which already includes the pip package management tool. + +To create a Python 3.10 environment named "dify," you can use the following command: + +```Bash +conda create --name dify python=3.10 +``` + +To switch to the "dify" Python environment, use the following command: + +``` +conda activate dify +``` + +#### Follow these steps : + +1. Navigate to the "api" directory: + +
cd api
+    
+2. Copy the environment variable configuration file: + + ``` + cp .env.example .env + ``` +3. Generate a random secret key and replace the value of SECRET_KEY in the .env file: + + ``` + openssl rand -base64 42 + sed -i 's/SECRET_KEY=.*/SECRET_KEY=/' .env + ``` +4. Install the required dependencies: + +
pip install -r requirements.txt
+    
+5. Perform the database migration + + Perform database migration to the latest version: + +
flask db upgrade6. Start the API server:
+    
+ + +6. Start the API server: + + ``` + flask run --host 0.0.0.0 --port=5001 --debug + ``` + + output: + + ``` + * Debug mode: on + INFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5001 + INFO:werkzeug:Press CTRL+C to quit + INFO:werkzeug: * Restarting with stat + WARNING:werkzeug: * Debugger is active! + INFO:werkzeug: * Debugger PIN: 695-801-919 + ``` +7. start the Worker service + + To consume asynchronous tasks from the queue, such as dataset file import and dataset document updates, follow these steps to start the Worker service on Linux or macOS: + + `celery -A app.celery worker -P gevent -c 1 -Q dataset,generation,mail --loglevel INFO` + + If you are using a Windows system to start the Worker service, please use the following command instead: + + ``` + celery -A app.celery worker -P solo --without-gossip --without-mingle -Q dataset,generation,mail --loglevel INFO + ``` + + output: + + ``` + -------------- celery@TAKATOST.lan v5.2.7 (dawn-chorus) + --- ***** ----- + -- ******* ---- macOS-10.16-x86_64-i386-64bit 2023-07-31 12:58:08 + - *** --- * --- + - ** ---------- [config] + - ** ---------- .> app: app:0x7fb568572a10 + - ** ---------- .> transport: redis://:**@localhost:6379/1 + - ** ---------- .> results: postgresql://postgres:**@localhost:5432/dify + - *** --- * --- .> concurrency: 1 (gevent) + -- ******* ---- .> task events: OFF (enable -E to monitor tasks in this worker) + --- ***** ----- + -------------- [queues] + .> dataset exchange=dataset(direct) key=dataset + .> generation exchange=generation(direct) key=generation + .> mail exchange=mail(direct) key=mail + + [tasks] + . tasks.add_document_to_index_task.add_document_to_index_task + . tasks.clean_dataset_task.clean_dataset_task + . tasks.clean_document_task.clean_document_task + . tasks.clean_notion_document_task.clean_notion_document_task + . tasks.create_segment_to_index_task.create_segment_to_index_task + . tasks.deal_dataset_vector_index_task.deal_dataset_vector_index_task + . tasks.document_indexing_sync_task.document_indexing_sync_task + . tasks.document_indexing_task.document_indexing_task + . tasks.document_indexing_update_task.document_indexing_update_task + . tasks.enable_segment_to_index_task.enable_segment_to_index_task + . tasks.generate_conversation_summary_task.generate_conversation_summary_task + . tasks.mail_invite_member_task.send_invite_member_mail_task + . tasks.remove_document_from_index_task.remove_document_from_index_task + . tasks.remove_segment_from_index_task.remove_segment_from_index_task + . tasks.update_segment_index_task.update_segment_index_task + . tasks.update_segment_keyword_index_task.update_segment_keyword_index_task + + [2023-07-31 12:58:08,831: INFO/MainProcess] Connected to redis://:**@localhost:6379/1 + [2023-07-31 12:58:08,840: INFO/MainProcess] mingle: searching for neighbors + [2023-07-31 12:58:09,873: INFO/MainProcess] mingle: all alone + [2023-07-31 12:58:09,886: INFO/MainProcess] pidbox: Connected to redis://:**@localhost:6379/1. + [2023-07-31 12:58:09,890: INFO/MainProcess] celery@TAKATOST.lan ready. + ``` + +*** + +## Deploy the frontend page + +Start the web frontend client page service + +#### Installation of the basic environment: + +To start the web frontend service, you will need [Node.js v18.x (LTS)](http://nodejs.org/) and [NPM version 8.x.x](https://www.npmjs.com/) or [Yarn](https://yarnpkg.com/). + +* Install NodeJS + NPM + +Please visit [https://nodejs.org/en/download](https://nodejs.org/en/download) and choose the installation package for your respective operating system that is v18.x or higher. It is recommended to download the stable version, which includes NPM by default. + +#### Follow these steps : + +1. Enter the web directory + + ``` + cd web + ``` +2. Install the dependencies. + + ``` + npm install + ``` +3. Configure the environment variables. Create a file named .env.local in the current directory and copy the contents from .env.example. Modify the values of these environment variables according to your requirements: + + ``` + # For production release, change this to PRODUCTION + NEXT_PUBLIC_DEPLOY_ENV=DEVELOPMENT + # The deployment edition, SELF_HOSTED or CLOUD + NEXT_PUBLIC_EDITION=SELF_HOSTED + # The base URL of console application, refers to the Console base URL of WEB service if console domain is + # different from api or web app domain. + # example: http://cloud.dify.ai/console/api + NEXT_PUBLIC_API_PREFIX=http://localhost:5001/console/api + # The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from + # console or api domain. + # example: http://udify.app/api + NEXT_PUBLIC_PUBLIC_API_PREFIX=http://localhost:5001/api + + # SENTRY + NEXT_PUBLIC_SENTRY_DSN= + NEXT_PUBLIC_SENTRY_ORG= + NEXT_PUBLIC_SENTRY_PROJECT= + ``` +4. Build the code + + ``` + npm run build + ``` +5. Start the web service: + + ``` + npm run start + # or + yarn start + # or + pnpm start + ``` + +After successful startup, the terminal will output the following information: + +``` +ready - started server on 0.0.0.0:3000, url: http://localhost:3000 +warn - You have enabled experimental feature (appDir) in next.config.js. +warn - Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk. +info - Thank you for testing `appDir` please leave your feedback at https://nextjs.link/app-feedback +``` + +### Access Dify + +Finally, access [http://127.0.0.1:3000](http://127.0.0.1:3000/) to use the locally deployed Dify. diff --git a/en/getting-started/install-self-hosted/start-the-frontend-docker-container.md b/en/getting-started/install-self-hosted/start-the-frontend-docker-container.md new file mode 100644 index 0000000..9806f98 --- /dev/null +++ b/en/getting-started/install-self-hosted/start-the-frontend-docker-container.md @@ -0,0 +1,24 @@ +# Start the frontend Docker container separately + +When developing the backend separately, you may only need to start the backend service from source code without building and launching the frontend locally. In this case, you can directly start the frontend service by pulling the Docker image and running the container. Here are the specific steps: + +#### Pull the Docker image for the frontend service from DockerHub: + +```Bash +docker run -it -p 3000:3000 -e EDITION=SELF_HOSTED -e CONSOLE_URL=http://127.0.0.1:3000 -e APP_URL=http://127.0.0.1:3000 langgenius/dify-web:latest +``` + +#### Build Docker Image from Source Code + +1. Build the frontend image: + + ``` + cd web && docker build . -t dify-web + ``` +2. Start the frontend image + + ``` + docker run -it -p 3000:3000 -e EDITION=SELF_HOSTED -e CONSOLE_URL=http://127.0.0.1:3000 -e APP_URL=http://127.0.0.1:3000 dify-web + ``` +3. When the console domain and web app domain are different, you can set the CONSOLE_URL and APP_URL separately. +4. To access it locally, you can visit [http://127.0.0.1:3000](http://127.0.0.1:3000/). diff --git a/en/getting-started/what-is-llmops.md b/en/getting-started/what-is-llmops.md new file mode 100644 index 0000000..d4666d1 --- /dev/null +++ b/en/getting-started/what-is-llmops.md @@ -0,0 +1,41 @@ +# What is LLMOps? + +**LLMOps (Large Language Model Operations) is a comprehensive set of practices and processes that cover the development, deployment, maintenance, and optimization of large language models (such as the GPT series). The goal of LLMOps is to ensure the efficient, scalable, and secure use of these powerful AI models to build and run real-world applications. It involves aspects such as model training, deployment, monitoring, updating, security, and compliance.** + +The table below illustrates the differences in various stages of AI application development before and after using Dify: + +| Steps | Before | After | Save time | +| ---------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------- | +| Developing Frontend & Backend for ApplicationsPrompt Engineering | Integrating and encapsulating LLM capabilities requires a lot of time to develop front-end applications. | Directly use Dify' backend services to develop based on a WebApp scaffold. | -80% | +| Prompt Engineering | Can only be done by calling APIs or Playground. | Debug based on the user's input data. | -25% | +| Data Preparation and Embedding | Writing code to implement long text data processing and embedding. | Upload text or bind data sources to the platform. | -80% | +| Application Logging and Analysis | Writing code to record logs and accessing databases to view them. | The platform provides real-time logging and analysis. | -70% | +| Data Analysis and Fine-Tuning | Technical personnel manage data and create fine-tuning queues. | Non-technical personnel can collaborate and adjust the model visually. | -60% | +| AI Plugin Development and Integration | Writing code to create and integrate AI plugins. | The platform provides visual tools for creating and integrating plugins. | -50% | + + + +Before using an LLMOps platform like Dify, the process of developing applications based on LLMs can be cumbersome and time-consuming. Developers need to handle tasks at each stage on their own, which can lead to inefficiencies, difficulties in scaling, and security issues. Here is the development process before using an LLMOps platform: + +1. Data Preparation: Manually collect and preprocess data, which may involve complex data cleaning and annotation work, requiring a significant amount of code. +2. Prompt Engineering: Developers can only write and debug Prompts through API calls or Playgrounds, lacking real-time feedback and visual debugging. +3. Embedding and Context Management: Manually handling the embedding and storage of long contexts, which can be difficult to optimize and scale, requiring a fair amount of programming work and familiarity with model embedding and vector databases. +4. Application Monitoring and Maintenance: Manually collect and analyze performance data, possibly unable to detect and address issues in real-time, and may even lack log records. +5. Model Fine-tuning: Independently manage the fine-tuning data preparation and training process, which can lead to inefficiencies and require more code. +6. System and Operations: Technical personnel involvement or cost required for developing a management backend, increasing development and maintenance costs, and lacking support for collaboration and non-technical users. + +With the introduction of an LLMOps platform like Dify, the process of developing applications based on LLMs becomes more efficient, scalable, and secure. Here are the advantages of developing LLM applications using Dify: + +1. Data Preparation: The platform provides data collection and preprocessing tools, simplifying data cleaning and annotation tasks, and minimizing or even eliminating coding work. +2. Prompt Engineering: WYSIWYG Prompt editing and debugging, allowing real-time optimization and adjustments based on user input data. +3. Embedding and Context Management: Automatically handling the embedding, storage, and management of long contexts, improving efficiency and scalability without the need for extensive coding. +4. Application Monitoring and Maintenance: Real-time monitoring of performance data, quickly identifying and addressing issues, ensuring the stable operation of applications, and providing complete log records. +5. Model Fine-tuning: The platform offers one-click fine-tuning functionality based on previously annotated real-use data, improving model performance and reducing coding work. +6. System and Operations: User-friendly interface accessible to non-technical users, supporting collaboration among multiple team members, and reducing development and maintenance costs. Compared to traditional development methods, Dify offers more transparent and easy-to-monitor application management, allowing team members to better understand the application's operation. + + + + Additionally, Dify will provide AI plugin development and integration features, enabling developers to easily create and deploy LLM-based plugins for various applications, further enhancing development efficiency and application value. + +**Dify** is an easy-to-use LLMOps platform designed to empower more people to create sustainable, AI-native applications. With visual orchestration for various application types, Dify offers out-of-the-box, ready-to-use applications that can also serve as Backend-as-a-Service APIs. Unify your development process with one API for plugins and datasets integration, and streamline your operations using a single interface for prompt engineering, visual analytics, and continuous improvement. + diff --git a/en/use-cases/build-an-notion-ai-assistant.md b/en/use-cases/build-an-notion-ai-assistant.md new file mode 100644 index 0000000..4ed2bc1 --- /dev/null +++ b/en/use-cases/build-an-notion-ai-assistant.md @@ -0,0 +1,166 @@ +# How to Build an Notion AI Assistant Based on Your Own Notes? + +### Intro[​](https://wsyfin.com/notion-dify#intro) + +Notion is a powerful tool for managing knowledge. Its flexibility and extensibility make it an excellent personal knowledge library and shared workspace. Many people use it to store their knowledge and work in collaboration with others, facilitating the exchange of ideas and the creation of new knowledge. + +However, this knowledge remains static, as users must search for the information they need and read through it to find the answers they're seeking. This process is neither particularly efficient nor intelligent. + +Have you ever dreamed of having an AI assistant based on your Notion library? This assistant would not only assist you in reviewing your knowledge base, but also engage in the communication like a seasoned butler, even answering other people's questions as if you were the master of your personal Notion library. + +### How to Make Your Notion AI Assistant Come True?[​](https://wsyfin.com/notion-dify#how-to-make-your-notion-ai-assistant-come-true) + +Now, you can make this dream come true through [Dify](https://dify.ai/). Dify is an open-source LLMOps (Large Language Models Ops) platform. + +Large Language Models like ChatGPT and Claude, have been using their impressive abilities to reshape the world. Their powerful learning aptitude primarily attributable to robust training data. Luckily, they've evolved to be sufficiently intelligent to learn from the content you provide, thus making the process of ideating from your personal Notion library, a reality. + +Without Dify, you might need to acquaint yourself with langchain, an abstraction that streamlines the process of assembling these pieces. + +### How to Use Dify to Build Your Personal Notion AI Assistant?[​](https://wsyfin.com/notion-dify#how-to-use-dify-to-build-your-own-ai-assistant) + +The process to train a Notion AI assistant is relatively straightforward. Just follow these steps: + +1. Login to Dify. +2. Create a new datasets. +3. Connect with Notion and your datasets. +4. Start training. +5. Create your own AI application. + +#### 1. Login to dify[​](https://wsyfin.com/notion-dify#1-login-to-dify) + +Click [here](https://dify.ai/) to login to Dify. You can conveniently log in using your GitHub or Google account. + +> If you are using GitHub account to login, how about getting this [project](https://github.com/langgenius/dify) a star? It really help us a lot! + +![login-1](https://pan.wsyfin.com/f/ERGcp/login-1.png) + +#### 2. Create new datasets[​](https://wsyfin.com/notion-dify#2-create-a-new-datasets)[​](https://wsyfin.com/notion-dify#2-create-a-new-datasets) + +Click the `Datasets` button on the top side bar, followed by the `Create Dataset` button. + +![login-2](https://pan.wsyfin.com/f/G6ziA/login-2.png) + +#### 3. Connect with Notion and Your Datasets[​](https://wsyfin.com/notion-dify#3-connect-with-notion-and-datasets) + +Select "Sync from Notion" and then click the "Connect" button.. + +![connect-with-notion-1](https://pan.wsyfin.com/f/J6WsK/connect-with-notion-1.png) + +Afterward, you'll be redirected to the Notion login page. Log in with your Notion account. + +
+ +Check the permissions needed by Dify, and then click the "Select pages" button. + +
+ +Select the pages you want to synchronize with Dify, and press the "Allow access" button. + +
+ +#### 4. Start training[​](https://wsyfin.com/notion-dify#4-start-training) + +Specifying the pages for AI need to study, enabling it to comprehend the content within this section of Notion. Then click the "next" button. + +![train-1](https://pan.wsyfin.com/f/Nkjuj/train-1.png) + +We suggest selecting the "Automatic" and "High Quality" options to train your AI assistant. Then click the "Save & Process" button. + +![train-2](https://pan.wsyfin.com/f/OYoCv/train-2.png) + +Enjoy your coffee while waiting for the training process to complete. + +![train-3](https://pan.wsyfin.com/f/PN9F3/train-3.png) + +#### 5. Create Your AI application[​](https://wsyfin.com/notion-dify#5-create-your-ai-application) + +You must create an AI application and link it with the dataset you've recently created. + +Return to the dashboard, and click the "Create new APP" button. It's recommended to use the Chat App directly. + +![create-app-1](https://pan.wsyfin.com/f/QWRHo/create-app-1.png) + +Select the "Prompt Eng." and link your notion datasets in the "context". + +![create-app-2](https://pan.wsyfin.com/f/R6DT5/create-app-2.png) + +I recommend adding a 'Pre Prompt' to your AI application. Just like spells are essential to Harry Potter, similarly, certain tools or features can greatly enhance the ability of AI application. + +For example, if your Notion notes focus on problem-solving in software development, could write in one of the prompts: + +_I want you to act as an IT Expert in my Notion workspace, using your knowledge of computer science, network infrastructure, Notion notes, and IT security to solve the problems_. + +
+ +It's recommended to initially enable the AI to actively furnish the users with a starter sentence, providing a clue as to what they can ask. Furthermore, activating the 'Speech to Text' feature can allow users to interact with your AI assistant using their voice. + +
+ +Finally, Click the "Publish" button on the top right of the page. Now you can click the public URL in the "Overview" section to converse with your personalized AI assistant! + +![create-app-4](https://pan.wsyfin.com/f/W69cD/create-app-4.png) + +### Utilizing API to Integrate With Your Project + +Each AI application baked by Dify can be accessed via its API. This method allows developers to tap directly into the robust characteristics of large language models (LLMs) within frontend applications, delivering a true "Backend-as-a-Service" (BaaS) experience. + +With effortless API integration, you can conveniently invoke your Notion AI application without the need for intricate configurations. + +Click the "API Reference" button on the page of Overview page. You can refer to it as your App's API document. + +![using-api-1](https://pan.wsyfin.com/f/wp0Cy/using-api-1.png) + +#### 1. Generate API Secret Key[​](https://wsyfin.com/notion-dify#1-generate-api-secret-key) + +For sercurity reason, it's recommened to create new API secret key to access your AI application. + +![using-api-2](https://pan.wsyfin.com/f/xk2Fx/using-api-2.png) + +#### 2. Retrieve Conversation ID[​](https://wsyfin.com/notion-dify#2-retrieve-conversation-id) + +After chatting with your AI application, you can retrieve the session ID from the "Logs & Ann." pages. + +![using-api-3](https://pan.wsyfin.com/f/yPXHL/using-api-3.png) + +#### 3. Invoke API[​](https://wsyfin.com/notion-dify#3-invoke-api) + +You can run the example request code on the API document to invoke your AI application in terminal. + +Remember to replace `YOUR SECRET KEY` and `conversation_id` on your code. + +> You can input empty `conversation_id` at the first time, and replace it after you receive response contained `conversation_id`. + +``` +curl --location --request POST 'https://api.dify.ai/v1/chat-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "eh", + "response_mode": "streaming", + "conversation_id": "", + "user": "abc-123" +}' +``` + +Sending request in terminal and you will get a successful response. + +![using-api-4](https://pan.wsyfin.com/f/zpnI4/using-api-4.png) + +If you want to continue this chat, go to replace the `conversation_id` of the request code to the `conversation_id` you get from the response. + +And you can check all the conversation history on the "Logs & Ann." page. + +![using-api-5](https://pan.wsyfin.com/f/ADQSE/using-api-5.png) + +### Sync with notion periodically[​](https://wsyfin.com/notion-dify#sync-with-notion-periodically) + +If your Notion's pages have updated, you can sync with Dify periodically to keep your AI assistant up-to-date. Your AI assistant will learn from the new content. + +![create-app-5](https://pan.wsyfin.com/f/XDBfO/create-app-5.png) + +### Summary[​](https://wsyfin.com/notion-dify#summary) + +In this tutorial, we have learned not only how to import Your Notion data into Dify, but also know how to use the API to integrate it with your project. + +[Dify](https://dify.ai/) is a user-friendly LLMOps platform targeted to empower more individuals to create sustainable, AI-native applications. With visual orchestration designed for various application types, Dify offers ready-to-use applications that can assist you in utilizing data to craft your distinctive AI assistant. Do not hesitate to contact us if you have any inquiries. diff --git a/en/use-cases/create-a-midjoureny-prompt-bot-with-dify.md b/en/use-cases/create-a-midjoureny-prompt-bot-with-dify.md new file mode 100644 index 0000000..243191e --- /dev/null +++ b/en/use-cases/create-a-midjoureny-prompt-bot-with-dify.md @@ -0,0 +1,59 @@ +# Create a Midjoureny Prompt Bot Without Code in Just a Few Minutes + +via [@op7418](https://twitter.com/op7418) on Twitter + +I recently tried out a natural language programming tool called Dify, developed by [@goocarlos](https://twitter.com/goocarlos). It allows someone without coding knowledge to create a web application just by writing prompts. It even generates the API for you, making it easy to deploy your application on your preferred platform. + + + +The application I created using Dify took me only 20 minutes, and the results were impressive. Without Dify, it might have taken me much longer to achieve the same outcome. The specific functionality of the application is to generate Midjourney prompts based on short input topics, assisting users in quickly filling in common Midjourney commands. In this tutorial, I will walk you through the process of creating this application to familiarize you with the platform. + +Dify offers two types of applications: conversational applications similar to ChatGPT, which involve multi-turn dialogue, and text generation applications that directly generate text content with the click of a button. Since we want to create a Midjoureny prompt bot, we'll choose the text generator. + +You can access Dify here: https://dify.ai/ + +
+ +Once you've created your application, the dashboard page will display some data monitoring and application settings. Click on "Prompt Engineering" on the left, which is the main working page. + +
+ +On this page, the left side is for prompt settings and other functions, while the right side provides real-time previews and usage of your created content. The prefix prompts are the triggers that the user inputs after each content, and they instruct the GPT model how to process the user's input information. + +
+ +Take a look at my prefix prompt structure: the first part instructs GPT to output a description of a photo in the following structure. The second structure serves as the template for generating the prompt, mainly consisting of elements like 'Color photo of the theme,' 'Intricate patterns,' 'Stark contrasts,' 'Environmental description,' 'Camera model,' 'Lens focal length description related to the input content,' 'Composition description relative to the input content,' and 'The names of four master photographers.' This constitutes the main content of the prompt. In theory, you can now save this to the preview area on the right, input the theme you want to generate, and the corresponding prompt will be generated. + +
+ +You may have noticed the "\{{proportion\}}" and "\{{version\}}" at the end. These are variables used to pass user-selected information. On the right side, users are required to choose image proportions and model versions, and these two variables help carry that information to the end of the prompt. Let's see how to set them up. + +
+ +Our goal is to fill in the user's selected information at the end of the prompt, making it easy for users to copy without having to rewrite or memorize these commands. For this, we use the variable function. + +Variables allow us to dynamically incorporate the user's form-filled or selected content into the prompt. For example, I've created two variables: one represents the image proportion, and the other represents the model version. Click the "Add" button to create the variables. + +
+ +After creation, you'll need to fill in the variable key and field name. The variable key should be in English. The optional setting means the field will be non-mandatory when the user fills it. Next, click "Settings" in the action bar to set the variable content. + +
+ +Variables can be of two types: text variables, where users manually input content, and select options where users select from given choices. Since we want to avoid manual commands, we'll choose the dropdown option and add the required choices. + +
+ +Now, let's use the variables. We need to enclose the variable key within double curly brackets {} and add it to the prefix prompt. Since we want the GPT to output the user-selected content as is, we'll include the phrase "Producing the following English photo description based on user input" in the prompt. + +
+ +However, there's still a chance that GPT might modify our variable content. To address this, we can lower the diversity in the model selection on the right, reducing the temperature and making it less likely to alter our variable content. You can check the tooltips for other parameters' meanings. + +
+ +With these steps, your application is now complete. After testing and ensuring there are no issues with the output, click the "Publish" button in the upper right corner to release your application. You and users can access your application through the publicly available URL. You can also customize the application name, introduction, icon, and other details in the settings. + +
+ +That's how you create a simple AI application using Dify. You can also deploy your application on other platforms or modify its UI using the generated API. Additionally, Dify supports uploading your own data, such as building a customer service bot to assist with product-related queries. This concludes the tutorial, and a special thanks to @goocarlos for creating such a fantastic product. diff --git a/en/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md b/en/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md new file mode 100644 index 0000000..a88aa88 --- /dev/null +++ b/en/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md @@ -0,0 +1,68 @@ +# Create an AI ChatBot with Business Data in Minutes + +AI-powered customer service may be a standard feature for every business website, and it is becoming easier to implement with higher levels of customization. The following content will guide you on how to create an AI-powered customer service for your website in just a few minutes using Dify. + +### Prerequisite + +**Register or Deploy Dify.AI** + +Dify is an open source product which you can find on[ GitHub](https://github.com/langgenius/dify) and deploy it to your local or company intranet. Meanwhile, it provides a cloud SaaS version, access [Didy.AI ](https://dify.ai/)to register and use it. + +**Apply for API key from OpenAI and other model providers.** + +Dify provides free message call usage quotas for OpenAI GPT series (200 times) and Antropic Claude (1000 times) AI models, which require tokens to be consumed. Before you run out, you need to apply for your own API key through the official channel of the model provider. You can enter the key in Dify's "Settings" - "Model Provider". + +### Upload your product documentation or knowledge base. + +If you want to build an AI Chatbot based on the company's existing knowledge base and product documents, then you need to upload as many product-related documents as possible to Dify's dataset. Dify helps you **complete segmentation and cleaning of the data.** The Dify dataset supports two indexing modes: high quality and economical. We recommend using the high quality mode, which consumes tokens but provides higher accuracy. + +1. Create a new dataset +2. upload your business data (support batch uploading multiple texts) +3. select the cleaning method +4. Click \[Save and Process], and it will take only a few seconds to complete the processing. + +
+ +### Create an AI application and give it instructions + +Create a conversational app on the \[Build App] page. Then start setting up the prompt and its front-end user experience interactions. + +1. Give the AI instruction: Click on the "Pre Prompt" on the left to edit your Prompt, so that it can play the role of customer service and communicate with users. You can specify its tone, style, and limit it to answer or not answer certain questions. +2. Let AI possess your business knowledge: add the target dataset you just uploaded in the \[context]. +3. Set up the opening remarks: click "Add Feature" to turn on the feature. The purpose is to add an opening line for AI applications, so that when the user opens the customer service window, it will greet the user first and increase affinity. +4. Set up the "Next Question Suggestion": turn on this feature to "Add Feature". The purpose is to give users a direction for their next question after they have asked one. +5. Choose a suitable model and adjust the parameters: different models can be selected in the upper right corner of the page. The performance and token price consumed by different models are different. In this example, we use the GPT3.5 model. + +In this case, we assign a role to the AI: + +> Pre prompt:You are Bob, the AI customer service for Dify, specializing in answering questions about Dify's products, team, or LLMOps for users.Please note, refuse to answer when users ask "inappropriate questions", i.e., content beyond the scope of this document. + +> Opening remarks:Hey \{{username\}}, I'm Bob☀️, the first AI member of Dify. You can discuss with me any questions related to Dify products, team, and even LLMOps. + +
+ +### Debug the performance of AI Chatbot and publish. + +After completing the setup, you can send messages to it on the right side of the current page to debug whether its performance meets expectations. Then click "Publish". And then you get an AI chatbot. + +
+ +### Embed AI Chatbot application into your front-end page. + +This step is to embed the prepared AI chatbot into your official website . Click \[Overview] -> \[Embedded], select the script tag method, and copy the script code into the \ or \ tag of your website. If you are not a technical person, you can ask the developer responsible for the official website to paste and update the page. + +
+ +1. Paste the copied code into the target location on your website. + +
+ +1. Update your official website and you can get an AI intelligent customer service with your business data. Try it out to see the effect. + +
+ +Above is an example of how to embed Dify into the official website through the AI chatbot Bob of Dify official website. Of course, you can also use more features provided by Dify to enhance the performance of the chatbot, such as adding some variable settings, so that users can fill in necessary judgment information before interaction, such as name, specific product used and so on. + +Welcome to explore in Dify together! + +
diff --git a/en/user-agreement/privacy-policy.md b/en/user-agreement/privacy-policy.md new file mode 100644 index 0000000..0cd1e18 --- /dev/null +++ b/en/user-agreement/privacy-policy.md @@ -0,0 +1,128 @@ +# Privacy Policy + +Welcome to LangGenius. LangGenius, Inc. (hereinafter referred to as"LangGenius","Dify.AI","Dify","we","our" or "us" ) respects your privacy and is committed to protecting your personal information. This privacy policy is intended to explain how we collect, use, protect, and share the information you provide when using LangGenius services. + +Please carefully read this Policy before you use or submit any information through or in connection with the Services. If you do not agree with this Policy, please do not access or use our Services or interact with any other aspect of our business. Unless otherwise required by laws in your residence, by using our Services, you accept our privacy practices described in this Policy. + +### What information we collect about you? + +We collect and store personal information that you directly provide us through our Site, when using our Products, and other ways. + +#### Information you provide to us + +We will collect and store personal information that you provide to us directly through our website when using our products, as well as through other means (such as through user support requests, interacting through social media, participating in surveys or promotions, applying for a job, and interacting on our website and at events). The information we collect includes, such as: + +* Account and profile information. When you register for an account, create or modify your profile, set preferences, sign-up for or make purchases through the Services, we collect information about you which includes without limitation your name, business telephone number and your email address, passwords, and similar security information used for authentication and account access. You may also choose to provide us with a display name, profile photo, job title, and other details to your profile information to be displayed in our Services. +* Content you provide through our products: As part of the service, we collect and store the content you post, send, receive, and share through our product. This includes any data you enter in any "free text" box on our product, as well as files and links you upload to the service. Examples of the content we collect and store include: applications you create in Dify.AI, descriptions of application-related commands, links to access applications, links to privacy policies for applications, or any other information you provide. +* Content provided by you through community platforms, instant messaging tools, or our website: We also collect other content that you submit to us for operation of website channels (such as social media or social networking sites). For example, when you provide feedback or participate in any interactive features, surveys, contests, promotions, sweepstakes, events, or activities, you provide content to us through phone, community interaction, IM services, etc. (such as GitHub, Twitter, Discord, WeChat, Slack, etc.) +* Information provided through our support channels: Through our user support, you can choose to submit information about any issues you encounter while using our services. You may contact us through email, third-party IM tools to directly communicate with our support team. You will be asked to provide contact information, a summary of the issue you are facing, and any additional documents, screenshots, or information that may help to resolve the problem. +* Payment and billing information: When you use certain paid services on Dify.AI, we collect your payment and billing information. You may also be required to provide credit card information to third-party secure payment processing service providers (such as Stripe). We do not store your credit card information. + +#### Information we collect automatically when you use the Services + +When you use our services (including browsing our website and taking certain actions within the service), we may collect information about you. + +* Your Use of the Services: When you access any of our services and interact with them, we may track certain information about you, including but not limited to the features you use; the links you click on; the type, size, and filenames of attachments you upload to the services; and how you interact or click on our product services. +* Device and Connection Information: We collect information about the devices you use to access our services, such as your computer, phone, tablet, or other devices. This type of device information also includes your connection type and settings when installing, accessing, updating, or using our services. We also collect information about your operating system, browser type, URL of referring/exit pages, IP address, device identifiers, and crash data through your device. +* Geolocation Data: Based on your device settings, we may collect geolocation data when you access our website and use our products. For example, we may use your IP address to infer your approximate location. +* Cookies and Other Tracking Technologies: We and our third-party partners, such as our advertising and analytics partners, use various common technologies to provide functionality and identify you across different services and devices. Such technologies typically include tracking pixels, JavaScript, and various "local storage data" technologies, such as cookies and local storage. Such data may include text, personal information (such as your IP address), and information about how you use our services, depending on the technologies we use. For the purposes of this policy, we collectively refer to the cookies and other technologies identified here as "Cookies." Most web browsers have a feature to block cookies. You can also choose to clear all cookies stored on your computer. + +#### Information we receive from other sources + +We receive information about you from other service users, our partners, and third-party service providers, social media platforms, and public databases. We may combine this information with the information we collect through other means. This helps us update and improve our records, identify new customers, create more personalized advertising, and recommend services that you may be interested in. When asked to provide personal information, you may refuse. However, if you choose not to provide the information required for certain products, these products or some of their features may not be available or may not function properly. + +We are not responsible for the data policies and procedures or content of any third party. We recommend that you review the privacy policies of each website you visit. + +### How we use information we collect? + +We collect and process personal information about you as necessary to provide the Products you use, operate our Sites and business, meet our contractual and legal obligations, protect the security of our systems and our customers, or fulfil other legitimate interests as described in this Privacy Policy and in our notices to you. + +For example, we may use any of the categories of personal information we describe above to: + +* Operate, maintain and improve our internal operations, systems, Sites, and Products. +* Understand you and your preferences to enhance your experience and enjoyment using our Sites and Products, to provide recommendations, to solicit feedback, and to better market and advertise to you. +* Monitor and analyze user interactions with our Sites and Products to identify trends, usage, and activity patterns. +* Respond to your comments and questions and provide technical support or customer service. +* Provide and deliver the Products you request. +* Comply with applicable laws, rules, or regulations and cooperate and defend legal claims and audits. +* Communicate with you about promotions, upcoming events, and other news about products and services offered by Dify.AI and our partners. +* Plan and host corporate events. +* Protect the Site and Products, and investigate and deter against fraudulent, unauthorized, or illegal activity. + +We may also use such information in any other way we may describe when you provide the information or for any other purpose with your consent. + +### How we share information we collect? + +We may share your personal information with your consent. We may also share any category of personal information described above: + +* Sharing with our business partners and other third-party service providers. We share information with third parties who help us operate, provide, improve, integrate, customize, support, and market our services. For example, to provide services to you, we may share information with third-party service partners who provide consulting support. We work with third-party service providers who provide website and application development, hosting, maintenance, backup, storage, virtual infrastructure, payment processing, analytics, and other services to us. Such services may require the service provider to access or use information about you. If a service provider needs to access information about you to act on our behalf in performing services, they will do so under our close instruction and adopt appropriate security and confidentiality procedures to protect your information. +* Sharing with potential buyers and advisors. If there is a company sale, merger, reorganization, dissolution, similar event, or measures taken in anticipation of such an event (such as due diligence in a transaction), your personal information may (in accordance with applicable law) be shared with our advisors and any potential buyer's advisors and be transferred to the new owner of the business. +* Sharing information to maintain compliance with laws and regulations. We may share information as required by law or subpoena, or if we reasonably believe that such action is necessary to comply with applicable laws or the reasonable requests of law enforcement, enforce our terms of service, or protect the security or integrity of our website and products, or to exercise or protect the rights, property, or personal safety of our customers, users, or others. + +### How we store and secure information we collect? + +Storage and Processing. The information collected through our website and our products may be stored and processed in any country/region where LangGenius or its affiliated companies or service providers maintain facilities, including your region, the United States, Australia, Canada, China, and the European Economic Area (including the United Kingdom). Our choice of processing location is to ensure efficient operations, improve performance, and create redundancy to protect data in the event of disruptions or other issues. We take measures to ensure that the data we collect in accordance with this Privacy Policy is processed in compliance with this Privacy Policy and applicable laws, regardless of where the data is located. + +International Data Transfers. When we transfer personal information from the European Economic Area (including the UK) and Switzerland to the United States or other countries/regions where the European Commission has not determined their laws provide adequate data protection, we use legal mechanisms designed to help ensure your rights and protections, including contracts. Specifically, our website servers are located in the United States, and our affiliates, partners, third parties, and service providers operate in the United States, European Economic Area, and China. This means that when we collect your personal information, we may process it in any of these countries. However, we have taken appropriate safeguards to require that your personal information is protected in accordance with this privacy policy. The main safeguard relied upon by LangGenius is the Standard Contractual Clauses for Data Protection approved by the European Commission. For more information about these mechanisms, please contact us using the detailed contact information provided in the "How to Contact Us" section below. + +Keeping your information safe. LangGenius cares about the security of your information and takes reasonable and appropriate technical and organizational measures designed to prevent loss, misuse, and unauthorized access, disclosure, alteration, and destruction of personal information. However, no security system is impenetrable, and we cannot guarantee the security of our systems or your information. + +Lawful basis for processing personal information (EEA only). LangGenius , is the data controller of your information. + +This section below is specifically for you if you are located in the European EconomicArea (EEA), United Kingdom or Switzerland. + +Our legal basis for collecting and using the personal information above will depend on the personal information concerned and the specific context in which we collect it. However, we will normally collect personal information only where we have your consent to do so, where we need the personal information to perform a contract with you, or where the processing is in our legitimate interests and not overridden by your data protection interests or fundamental rights and freedoms. In some cases, we may also have a legal obligation to collect personal information from you. + +If we ask you to provide personal information to comply with a legal requirement or to perform a contract with you, we will indicate this at the relevant time and advise you whether the provision of your personal information is mandatory or not (as well as the possible consequences, if any, if you do not provide your personal information). Similarly, if we collect and use your personal information in reliance on our legitimate interests (or those of a third party), we will indicate to you at the relevant time what those legitimate interests are. + +If you have questions about the legal basis for processing or want to find out more, please contact us using the details at the end of this Privacy Policy. + +Retention. We retain personal information for as long as necessary for the purposes for which the personal information is processed and for longer periods as necessary for us to comply with applicable laws. For example, we retain your account information for as long as your account is active or as needed to provide you with Products you have requested or authorized, including maintaining and improving the performance of the Products and protecting system security. We also retain personal data as needed to maintain appropriate business and financial records, protect our legal interests, resolve disputes, or comply with legal or regulatory requirements. Thereafter, we will either delete or anonymize it or, if this is not possible (for example, because your personal information has been stored in backup archives), then we will store your personal information using appropriate security measures and take appropriate steps designed to isolate it from any further processing until deletion is possible. + +### How to access and control your information? + +You have certain rights regarding your personal information, subject to the applicable laws. These include the following rights to: + +* Access your personal information: You have the right to ask us to confirm whether we are processing your personal information, and, where that is the case, access to the personal information and receive information on how your data is processed as well as ask us to provide a copy of your personal information. +* Rectify your personal information: You have the right to have any incorrect, incomplete or inaccurate data we hold about you corrected. +* Erase your personal information: You have the right to ask us to delete your personal information when, for example, the data we hold on you is no longer needed or when your data has been processed unlawfully. +* Object to processing: You have the right to object to the processing of your personal information and request us to cease processing of it if, for example, this data is being processed for the purpose of direct marketing or where we are relying on a legitimate interest (or those of a third party). Under certain circumstances, we may demonstrate that we have compelling legitimate grounds to process your information which override your rights and freedoms. +* Restrict the processing: You have the right to ask us to suspend the processing of your personal information in the following scenarios: (a) if you want us to establish the accuracy of the personal information; (b) if our use of the data is illegal but you do not want it erased; (c) if you require us to hold the data even if we no longer need it as you require it to establish, exercise or defend legal claims; or (d) if you have objected to our use of your data but we need to verify whether we have overriding legitimate grounds to use it. +* Receive your personal information in a usable electronic format and transmit it to a third party (right to data portability): If we are processing your personal information based on your consent or a contract, you can ask to receive your personal information in a structured, commonly used and machine-readable format. Without any obstacle from us, you can also ask us to transmit those data to another controller. +* Withdraw consent: Where we are relying on your consent to process your personal information, you have the right to withdraw your consent at any time. However, this will not affect the lawfulness of any processing carried out before you withdraw your consent. If you withdraw your consent, we will no longer process that personal information, but we may be unable to continue providing certain products or services to you for which the personal information was sought. At the time you withdraw your consent, we will advise you if this is the case. +* Opt-out of communications: By using the unsubscribe link within each email, updating your email preferences within your Service account settings menu, or by contacting us as provided below to have your contact information removed from our promotional email list or registration database, you may opt-out of receiving promotional communications from us. You will continue to receive transactional messages from us regarding our Services even after you opt-out from receiving promotional messages from us. You can opt-out of some notification messages in your account settings. Please note, you will continue to receive generic ads. +* Send "Do Not Track" Signals: Some browsers have incorporated "Do Not Track" (DNT) features that can send a signal to the websites you visit indicating you do not wish to be tracked. Our Services do not currently respond to browser DNT signals since there is not yet a common understanding of how to interpret the DNT signal. You can use the range of other tools we provide to control data collection and use, including the ability to opt-out of receiving marketing from us as described above. + +These rights may be limited in some situations – for example, where we can demonstrate that we have a legal requirement to process your data (such as where tax authorities require us to retain it) or where it is needed for the proper performance of a contract. Under certain circumstances, this may mean that we are able to retain data even if you withdraw your consent. + +If an administrator manages the service for you (see "Notice to End Users" below), you may need to contact your administrator first to assist with your requests. For all other requests, please contact us through the details in the "How to Contact Us" section below. If you have unresolved concerns, you may have the right to complain to a data protection authority in the country where you live, where you work or where you feel your rights were infringed. + +If you are a California resident, please kindly refer to the "California Requirements" below for your rights and other important information. + +### Other important privacy information + +#### California Requirements + +There are some additional rights that may be available to you under the California Consumer Protection Act ("CCPA") if you are a California resident. In addition to those listed under "How to access and control your information" above, we will explain to you in the following paragraphs how you may exercise your rights under the CCPA: + +* Right against discrimination: You have the right not to be discriminated against for exercising any of the rights described in this section. We will not discriminate against you for exercising your right to know, delete or opt-out of sales. +* Right to opt-out of selling: You have the right to opt-out of having your personal information sold. We do not sell your personal information. The terms of "personal information" and "selling" are broadly defined under the CCPA and such that sharing identifiers linked to you for a benefit may be considered a sale. You have the right to understand the types of personal information that we sold about you and the types of third parties with whom we shared such information. +* Processing your information: his Policy outlines the types of personal information we may collect, the sources of that information, as well as our rules on deletion and retention. We’ve also included information about how we may process your information, which includes for "business purposes" as defined by the CCPA - such as to protect against illegal activities, and for the development of new products, features, and technologies. If you have questions regarding the categories of information we may collect about you, please refer to the section of this Policy called "What information we collect about you". You may also refer to the section called "How we use information we collect" for more details about our processing activities. + +#### Our policy toward children + +Our Services are NOT directed to children under the age of 18 and we do not knowingly collect personal information from children under 18. If we become aware that a child under 18 has provided us with personal information, we will promptly delete such personal data from our systems. If you become aware or have reason to believe that a child has provided us with personal information through our Services, please contact us at the details in Section "How to Contact Us" below and we will delete that information from our databases. + +#### Changes to our policy + +We may modify this Policy at any time, without prior notice, and changes may apply to any personal information we already hold about you, as well as any new personal information collected after the Policy is modified. If we make changes, we will notify you by revising the date at the top of this Policy. We will provide you with advanced and more prominent notice if we make any material changes to how we collect, use or disclose your personal information that impacts your rights under this Policy. Unless otherwise required by laws in your residence, your continued access or use of our Services after receiving the notice of changes, constitutes your acknowledgement that you accept the updated Policy. + +In addition, we may provide you with real-time disclosures or additional information about the personal information handling practices of specific parts of our Services. Such notices may supplement this Policy or provide you with additional choices about how we process your personal information.If you disagree with any changes to this Policy, you will need to stop using the Services and deactivate your account(s), as outlined above. + +### How to contact us? + +Your information is controlled by LangGenius. If you have questions or concerns about how your information is handled, please direct your inquiry to LangGenius, which is responsible for facilitating such inquiries. + +LangGenius, Inc., a Delaware registered company (File No. 7358523), USA. + +Email: Hello@dify.ai. diff --git a/en/user-agreement/terms-of-service.md b/en/user-agreement/terms-of-service.md new file mode 100644 index 0000000..50e38c9 --- /dev/null +++ b/en/user-agreement/terms-of-service.md @@ -0,0 +1,26 @@ +# Terms of Service + +The following document describes the conditions of use of our Websites and Services. + +LangGenius, Inc. ("LangGenius") is a US company whose registered office is located at 651 N BROAD ST SUITE 201, Delaware, USA, under the file number 7358523 and represented by Mr. Luyu Zhang, its CEO. + +LangGenius is pleased to give you access to LangGenius's website ([dify.ai](https://dify.ai/)) ("Website","Dify.AI"or "Dify"), and related applications and resources (collectively, the "Services"). Your use of the Services is subject to the binding legal agreement set forth below ("Terms"). + +### Beta Software + +We may provide you with beta and experimental products, features, and documentation ("Beta Software") on an early access basis. Beta Software is not generally available and may contain errors, defects, and inaccuracies. We provide Beta Software "as is" without any warranties and may terminate Beta Software at any time without ensuring the preservation of Beta Software data. Our service level agreements do not apply to Beta Software. If Beta Software becomes generally available, you may choose to pay for the software or stop using it. We may use your feedback on Beta Software. + +### Usage of LangGenius Services + +You may use the Dify.AI development platform to create software applications for your target user group to achieve your commercial objectives. You understand and acknowledge that the language and information generated by software applications created based on Dify.AI are derived from user-written prompts and their Large Language Model (LLM) providers (such as OpenAI). LLM is an immature technology, and any negative consequences arising from the language and information generated are not related to Dify.AI , especially if you commercialize the application but fail to meet your operational expectations (including but not limited to not meeting your advertising description expectations, target effect description expectations, etc.) or experience service interruptions. + +The development of prompts for LLM models is the core product capability and service provided by Dify. During your use of Dify for prompt engineering, embedding, fine-tuning, and testing of LLM models, additional resource consumption may occur. By registering or starting to use Dify, you agree to the potential consumption of resources and costs associated with the LLM algorithms of this product. You should closely monitor billing changes from third-party LLM providers. You shall bear all consequences of any abnormal consumption and billing issues that prevent you from using Dify services normally. + +### Intellectual Property + +The Services, logo, interfaces, photos, Websites are owned by LangGenius, Inc. or its licensees and protected by applicable law, with all rights reserved. LangGenius takes the protection of intellectual property rights, including copyrights, very seriously. LangGenius will terminate your access to, or use of, all or any part of the Services, without notice to you, if you infringe or misappropriate the intellectual property rights, including copyrights, of others. + +### Applicable Law + +The Terms are governed by the laws of the State of California, United States. + diff --git a/en/web-application/conversation-application.md b/en/web-application/conversation-application.md new file mode 100644 index 0000000..4f41bff --- /dev/null +++ b/en/web-application/conversation-application.md @@ -0,0 +1,48 @@ +# Conversation Application + +Conversational applications use a question-and-answer model to maintain a dialogue with the user. Conversational applications support the following capabilities (please confirm that the following functions are enabled when the application is programmed): + +* Variables to fill in before the dialog. Create, pin, and delete conversations. +* Conversation remarks. +* Follow-up. +* Speech to text. + +### Variables filled in before the dialog + +If you have the requirement to fill in variables when you apply the layout, you need to fill in the information according to the prompts before entering the dialog window: + +
+ +Fill in the necessary content and click the "Start Chat" button to start chatting. + +
+ +Move to the AI's answer, you can copy the content of the conversation, and give the answer "like" and "dislike". + +
+ +### Conversation creation, pinning and deletion + +Click the "New Conversation" button to start a new conversation. Move to a session, and the session can be "pinned" and "deleted". + +
+ +### Conversation remarks + +If the "Conversation remarks" function is enabled when the application is programmed, the AI application will automatically initiate the first sentence of the dialogue when creating a new dialogue: + +
+ +### Follow-up + +If the "Follow-up" function is enabled during the application arrangement, the system will automatically generate 3 related question suggestions after the dialogue: + +
+ +### Speech to text + +If the "Speech to Text" function is enabled during application programming, you will see the voice input icon in the input box on the web application side, click the icon to convert the voice input into text: + +_Please make sure that the device environment you are using is authorized to use the microphone._ + +
diff --git a/en/web-application/overview.md b/en/web-application/overview.md new file mode 100644 index 0000000..75698f4 --- /dev/null +++ b/en/web-application/overview.md @@ -0,0 +1,28 @@ +# Overview + +Web applications are for application consumers. When an application developer creates an application in Dify, he will get a corresponding web application. Users of the web application can use it without logging in. The web application is adapted to different sizes of devices: PC, tablet and mobile. + +The content of the web application is consistent with the configuration published by the application. When the configuration of the application is modified and the "Publish" button is clicked on the prompt word layout page of the application to publish, the content of the web application will also be updated according to the configuration of the current application. + +We can enable and disable access to the web application on the application overview page, and modify the site information of the web application: + +* Icon +* Name +* Application Description +* Interface Language +* Copyright Information +* Privacy Policy Link + +The functional performance of the web application depends on whether the developer enables this function when compiling the application, for example: + +* Conversation remarks +* Variables filled in before the conversation +* Follow-up +* Speech to text +* More answers like this (Text Generation apps) +* ... + +In the following chapters, we will introduce the two types of web applications separately: + +* Text Generator +* Conversational diff --git a/en/web-application/text-generator.md b/en/web-application/text-generator.md new file mode 100644 index 0000000..27baeb0 --- /dev/null +++ b/en/web-application/text-generator.md @@ -0,0 +1,60 @@ +# Text Generator + +The text generation application is an application that automatically generates high-quality text according to the prompts provided by the user. It can generate various types of text, such as article summaries, translations, etc. + + + +Text generation applications support the following features: + +1. Run it once. +2. Run in batches. +3. Save the run results. +4. Generate more similar results. + +Let's introduce them separately. + + + +### Run it once + +Enter the query content, click the run button, and the result will be generated on the right, as shown in the following figure: + +
+ +In the generated results section, click the "Copy" button to copy the content to the clipboard. Click the "Save" button to save the content. You can see the saved content in the "Saved" tab. You can also "like" and "dislike" the generated content. + +### Run in batches + +Sometimes, we need to run an application many times. For example: There is a web application that can generate articles based on topics. Now we want to generate 100 articles on different topics. Then this task has to be done 100 times, which is very troublesome. Also, you have to wait for one task to complete before starting the next one. + +In the above scenario, the batch operation function is used, which is convenient to operate (enter the theme into a `csv` file, only need to be executed once), and also saves the generation time (multiple tasks run at the same time). The usage is as follows: + +#### Step 1 Enter the batch run page + +Click the "Run Batch" tab to enter the batch run page. + +
+ +#### Step 2 Download the template and fill in the content + +Click the Download Template button to download the template. Edit the template, fill in the content, and save as a `.csv` file. + +
+ +#### Step 3 Upload the file and run + +
+ +If you need to export the generated content, you can click the download "button" in the upper right corner to export as a `csv` file. + +### Save run results + +Click the "Save" button below the generated results to save the running results. In the "Saved" tab, you can see all saved content. + +
+ +### Generate more similar results + +If the "more similar" function is turned on when applying the arrangement. Clicking the "more similar" button in the web application generates content similar to the current result. As shown below: + +
diff --git a/zh_CN/.gitbook/assets/640-1.jpg b/zh_CN/.gitbook/assets/640-1.jpg new file mode 100644 index 0000000..1042d3c Binary files /dev/null and b/zh_CN/.gitbook/assets/640-1.jpg differ diff --git a/zh_CN/.gitbook/assets/640-10.jpg b/zh_CN/.gitbook/assets/640-10.jpg new file mode 100644 index 0000000..befb777 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-10.jpg differ diff --git a/zh_CN/.gitbook/assets/640-10.png b/zh_CN/.gitbook/assets/640-10.png new file mode 100644 index 0000000..e490cda Binary files /dev/null and b/zh_CN/.gitbook/assets/640-10.png differ diff --git a/zh_CN/.gitbook/assets/640-11.jpg b/zh_CN/.gitbook/assets/640-11.jpg new file mode 100644 index 0000000..e350543 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-11.jpg differ diff --git a/zh_CN/.gitbook/assets/640-11.png b/zh_CN/.gitbook/assets/640-11.png new file mode 100644 index 0000000..76b380d Binary files /dev/null and b/zh_CN/.gitbook/assets/640-11.png differ diff --git a/zh_CN/.gitbook/assets/640-12.jpg b/zh_CN/.gitbook/assets/640-12.jpg new file mode 100644 index 0000000..0f37af7 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-12.jpg differ diff --git a/zh_CN/.gitbook/assets/640-12.png b/zh_CN/.gitbook/assets/640-12.png new file mode 100644 index 0000000..ca14ace Binary files /dev/null and b/zh_CN/.gitbook/assets/640-12.png differ diff --git a/zh_CN/.gitbook/assets/640-13.png b/zh_CN/.gitbook/assets/640-13.png new file mode 100644 index 0000000..a6f074c Binary files /dev/null and b/zh_CN/.gitbook/assets/640-13.png differ diff --git a/zh_CN/.gitbook/assets/640-14.png b/zh_CN/.gitbook/assets/640-14.png new file mode 100644 index 0000000..9d519ff Binary files /dev/null and b/zh_CN/.gitbook/assets/640-14.png differ diff --git a/zh_CN/.gitbook/assets/640-2.jpg b/zh_CN/.gitbook/assets/640-2.jpg new file mode 100644 index 0000000..a131126 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-2.jpg differ diff --git a/zh_CN/.gitbook/assets/640-2.png b/zh_CN/.gitbook/assets/640-2.png new file mode 100644 index 0000000..712c2da Binary files /dev/null and b/zh_CN/.gitbook/assets/640-2.png differ diff --git a/zh_CN/.gitbook/assets/640-3.jpg b/zh_CN/.gitbook/assets/640-3.jpg new file mode 100644 index 0000000..741a60c Binary files /dev/null and b/zh_CN/.gitbook/assets/640-3.jpg differ diff --git a/zh_CN/.gitbook/assets/640-3.png b/zh_CN/.gitbook/assets/640-3.png new file mode 100644 index 0000000..00e0295 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-3.png differ diff --git a/zh_CN/.gitbook/assets/640-4.jpg b/zh_CN/.gitbook/assets/640-4.jpg new file mode 100644 index 0000000..f186ae4 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-4.jpg differ diff --git a/zh_CN/.gitbook/assets/640-4.png b/zh_CN/.gitbook/assets/640-4.png new file mode 100644 index 0000000..5be378b Binary files /dev/null and b/zh_CN/.gitbook/assets/640-4.png differ diff --git a/zh_CN/.gitbook/assets/640-5.jpg b/zh_CN/.gitbook/assets/640-5.jpg new file mode 100644 index 0000000..f349a08 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-5.jpg differ diff --git a/zh_CN/.gitbook/assets/640-5.png b/zh_CN/.gitbook/assets/640-5.png new file mode 100644 index 0000000..0853125 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-5.png differ diff --git a/zh_CN/.gitbook/assets/640-6.jpg b/zh_CN/.gitbook/assets/640-6.jpg new file mode 100644 index 0000000..afb849a Binary files /dev/null and b/zh_CN/.gitbook/assets/640-6.jpg differ diff --git a/zh_CN/.gitbook/assets/640-6.png b/zh_CN/.gitbook/assets/640-6.png new file mode 100644 index 0000000..2dbb7d6 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-6.png differ diff --git a/zh_CN/.gitbook/assets/640-7.jpg b/zh_CN/.gitbook/assets/640-7.jpg new file mode 100644 index 0000000..9074985 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-7.jpg differ diff --git a/zh_CN/.gitbook/assets/640-7.png b/zh_CN/.gitbook/assets/640-7.png new file mode 100644 index 0000000..0459222 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-7.png differ diff --git a/zh_CN/.gitbook/assets/640-8.jpg b/zh_CN/.gitbook/assets/640-8.jpg new file mode 100644 index 0000000..dcf1f93 Binary files /dev/null and b/zh_CN/.gitbook/assets/640-8.jpg differ diff --git a/zh_CN/.gitbook/assets/640-8.png b/zh_CN/.gitbook/assets/640-8.png new file mode 100644 index 0000000..4ab0a5b Binary files /dev/null and b/zh_CN/.gitbook/assets/640-8.png differ diff --git a/zh_CN/.gitbook/assets/640-9.jpg b/zh_CN/.gitbook/assets/640-9.jpg new file mode 100644 index 0000000..911923b Binary files /dev/null and b/zh_CN/.gitbook/assets/640-9.jpg differ diff --git a/zh_CN/.gitbook/assets/640-9.png b/zh_CN/.gitbook/assets/640-9.png new file mode 100644 index 0000000..304a64f Binary files /dev/null and b/zh_CN/.gitbook/assets/640-9.png differ diff --git a/zh_CN/.gitbook/assets/640.jpg b/zh_CN/.gitbook/assets/640.jpg new file mode 100644 index 0000000..010f651 Binary files /dev/null and b/zh_CN/.gitbook/assets/640.jpg differ diff --git a/zh_CN/.gitbook/assets/640.png b/zh_CN/.gitbook/assets/640.png new file mode 100644 index 0000000..50c35db Binary files /dev/null and b/zh_CN/.gitbook/assets/640.png differ diff --git a/zh_CN/.gitbook/assets/add-new-segment.png b/zh_CN/.gitbook/assets/add-new-segment.png new file mode 100644 index 0000000..5bf6e00 Binary files /dev/null and b/zh_CN/.gitbook/assets/add-new-segment.png differ diff --git a/zh_CN/.gitbook/assets/app-log.png b/zh_CN/.gitbook/assets/app-log.png new file mode 100644 index 0000000..c066d21 Binary files /dev/null and b/zh_CN/.gitbook/assets/app-log.png differ diff --git a/zh_CN/.gitbook/assets/app-share.png b/zh_CN/.gitbook/assets/app-share.png new file mode 100644 index 0000000..95bfb8c Binary files /dev/null and b/zh_CN/.gitbook/assets/app-share.png differ diff --git a/zh_CN/.gitbook/assets/create-app.png b/zh_CN/.gitbook/assets/create-app.png new file mode 100644 index 0000000..8f5c28c Binary files /dev/null and b/zh_CN/.gitbook/assets/create-app.png differ diff --git a/zh_CN/.gitbook/assets/how-to-configure-azure-openai.png b/zh_CN/.gitbook/assets/how-to-configure-azure-openai.png new file mode 100644 index 0000000..a0d3dd1 Binary files /dev/null and b/zh_CN/.gitbook/assets/how-to-configure-azure-openai.png differ diff --git a/zh_CN/.gitbook/assets/image (1).png b/zh_CN/.gitbook/assets/image (1).png new file mode 100644 index 0000000..c1d4744 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (1).png differ diff --git a/zh_CN/.gitbook/assets/image (10).png b/zh_CN/.gitbook/assets/image (10).png new file mode 100644 index 0000000..d30d2ab Binary files /dev/null and b/zh_CN/.gitbook/assets/image (10).png differ diff --git a/zh_CN/.gitbook/assets/image (100).png b/zh_CN/.gitbook/assets/image (100).png new file mode 100644 index 0000000..15b5f3c Binary files /dev/null and b/zh_CN/.gitbook/assets/image (100).png differ diff --git a/zh_CN/.gitbook/assets/image (101).png b/zh_CN/.gitbook/assets/image (101).png new file mode 100644 index 0000000..dcd40ac Binary files /dev/null and b/zh_CN/.gitbook/assets/image (101).png differ diff --git a/zh_CN/.gitbook/assets/image (102).png b/zh_CN/.gitbook/assets/image (102).png new file mode 100644 index 0000000..d5b1b11 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (102).png differ diff --git a/zh_CN/.gitbook/assets/image (103).png b/zh_CN/.gitbook/assets/image (103).png new file mode 100644 index 0000000..54b9924 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (103).png differ diff --git a/zh_CN/.gitbook/assets/image (104).png b/zh_CN/.gitbook/assets/image (104).png new file mode 100644 index 0000000..49fd5c1 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (104).png differ diff --git a/zh_CN/.gitbook/assets/image (105).png b/zh_CN/.gitbook/assets/image (105).png new file mode 100644 index 0000000..6a5bdf4 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (105).png differ diff --git a/zh_CN/.gitbook/assets/image (106).png b/zh_CN/.gitbook/assets/image (106).png new file mode 100644 index 0000000..d451097 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (106).png differ diff --git a/zh_CN/.gitbook/assets/image (107).png b/zh_CN/.gitbook/assets/image (107).png new file mode 100644 index 0000000..1d045ec Binary files /dev/null and b/zh_CN/.gitbook/assets/image (107).png differ diff --git a/zh_CN/.gitbook/assets/image (108).png b/zh_CN/.gitbook/assets/image (108).png new file mode 100644 index 0000000..27452ba Binary files /dev/null and b/zh_CN/.gitbook/assets/image (108).png differ diff --git a/zh_CN/.gitbook/assets/image (109).png b/zh_CN/.gitbook/assets/image (109).png new file mode 100644 index 0000000..c981540 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (109).png differ diff --git a/zh_CN/.gitbook/assets/image (11).png b/zh_CN/.gitbook/assets/image (11).png new file mode 100644 index 0000000..043610d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (11).png differ diff --git a/zh_CN/.gitbook/assets/image (110).png b/zh_CN/.gitbook/assets/image (110).png new file mode 100644 index 0000000..2004039 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (110).png differ diff --git a/zh_CN/.gitbook/assets/image (111).png b/zh_CN/.gitbook/assets/image (111).png new file mode 100644 index 0000000..8621e9f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (111).png differ diff --git a/zh_CN/.gitbook/assets/image (12).png b/zh_CN/.gitbook/assets/image (12).png new file mode 100644 index 0000000..b48dea6 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (12).png differ diff --git a/zh_CN/.gitbook/assets/image (13).png b/zh_CN/.gitbook/assets/image (13).png new file mode 100644 index 0000000..12f51f4 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (13).png differ diff --git a/zh_CN/.gitbook/assets/image (14).png b/zh_CN/.gitbook/assets/image (14).png new file mode 100644 index 0000000..ca4cc0b Binary files /dev/null and b/zh_CN/.gitbook/assets/image (14).png differ diff --git a/zh_CN/.gitbook/assets/image (15).png b/zh_CN/.gitbook/assets/image (15).png new file mode 100644 index 0000000..db1690f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (15).png differ diff --git a/zh_CN/.gitbook/assets/image (16).png b/zh_CN/.gitbook/assets/image (16).png new file mode 100644 index 0000000..97bdd29 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (16).png differ diff --git a/zh_CN/.gitbook/assets/image (17).png b/zh_CN/.gitbook/assets/image (17).png new file mode 100644 index 0000000..4508c5a Binary files /dev/null and b/zh_CN/.gitbook/assets/image (17).png differ diff --git a/zh_CN/.gitbook/assets/image (18).png b/zh_CN/.gitbook/assets/image (18).png new file mode 100644 index 0000000..0fba94f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (18).png differ diff --git a/zh_CN/.gitbook/assets/image (19).png b/zh_CN/.gitbook/assets/image (19).png new file mode 100644 index 0000000..2ea7d6f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (19).png differ diff --git a/zh_CN/.gitbook/assets/image (2).png b/zh_CN/.gitbook/assets/image (2).png new file mode 100644 index 0000000..db1690f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (2).png differ diff --git a/zh_CN/.gitbook/assets/image (20).png b/zh_CN/.gitbook/assets/image (20).png new file mode 100644 index 0000000..0686b1f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (20).png differ diff --git a/zh_CN/.gitbook/assets/image (21).png b/zh_CN/.gitbook/assets/image (21).png new file mode 100644 index 0000000..db58b18 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (21).png differ diff --git a/zh_CN/.gitbook/assets/image (22).png b/zh_CN/.gitbook/assets/image (22).png new file mode 100644 index 0000000..30795cc Binary files /dev/null and b/zh_CN/.gitbook/assets/image (22).png differ diff --git a/zh_CN/.gitbook/assets/image (23).png b/zh_CN/.gitbook/assets/image (23).png new file mode 100644 index 0000000..a2a9fe4 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (23).png differ diff --git a/zh_CN/.gitbook/assets/image (24).png b/zh_CN/.gitbook/assets/image (24).png new file mode 100644 index 0000000..c24e33d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (24).png differ diff --git a/zh_CN/.gitbook/assets/image (25).png b/zh_CN/.gitbook/assets/image (25).png new file mode 100644 index 0000000..a8c856e Binary files /dev/null and b/zh_CN/.gitbook/assets/image (25).png differ diff --git a/zh_CN/.gitbook/assets/image (26).png b/zh_CN/.gitbook/assets/image (26).png new file mode 100644 index 0000000..b06128f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (26).png differ diff --git a/zh_CN/.gitbook/assets/image (27).png b/zh_CN/.gitbook/assets/image (27).png new file mode 100644 index 0000000..bfde71f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (27).png differ diff --git a/zh_CN/.gitbook/assets/image (28).png b/zh_CN/.gitbook/assets/image (28).png new file mode 100644 index 0000000..165d029 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (28).png differ diff --git a/zh_CN/.gitbook/assets/image (29).png b/zh_CN/.gitbook/assets/image (29).png new file mode 100644 index 0000000..cd6f027 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (29).png differ diff --git a/zh_CN/.gitbook/assets/image (3).png b/zh_CN/.gitbook/assets/image (3).png new file mode 100644 index 0000000..1e24423 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (3).png differ diff --git a/zh_CN/.gitbook/assets/image (30).png b/zh_CN/.gitbook/assets/image (30).png new file mode 100644 index 0000000..13fa877 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (30).png differ diff --git a/zh_CN/.gitbook/assets/image (31).png b/zh_CN/.gitbook/assets/image (31).png new file mode 100644 index 0000000..f85301d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (31).png differ diff --git a/zh_CN/.gitbook/assets/image (32).png b/zh_CN/.gitbook/assets/image (32).png new file mode 100644 index 0000000..edb1468 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (32).png differ diff --git a/zh_CN/.gitbook/assets/image (33).png b/zh_CN/.gitbook/assets/image (33).png new file mode 100644 index 0000000..1bf35a9 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (33).png differ diff --git a/zh_CN/.gitbook/assets/image (34).png b/zh_CN/.gitbook/assets/image (34).png new file mode 100644 index 0000000..63dc5b2 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (34).png differ diff --git a/zh_CN/.gitbook/assets/image (35).png b/zh_CN/.gitbook/assets/image (35).png new file mode 100644 index 0000000..d23c1e3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (35).png differ diff --git a/zh_CN/.gitbook/assets/image (36).png b/zh_CN/.gitbook/assets/image (36).png new file mode 100644 index 0000000..c24e33d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (36).png differ diff --git a/zh_CN/.gitbook/assets/image (37).png b/zh_CN/.gitbook/assets/image (37).png new file mode 100644 index 0000000..3d09805 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (37).png differ diff --git a/zh_CN/.gitbook/assets/image (38).png b/zh_CN/.gitbook/assets/image (38).png new file mode 100644 index 0000000..9883560 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (38).png differ diff --git a/zh_CN/.gitbook/assets/image (39).png b/zh_CN/.gitbook/assets/image (39).png new file mode 100644 index 0000000..b4e0be6 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (39).png differ diff --git a/zh_CN/.gitbook/assets/image (4).png b/zh_CN/.gitbook/assets/image (4).png new file mode 100644 index 0000000..0ef5f75 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (4).png differ diff --git a/zh_CN/.gitbook/assets/image (40).png b/zh_CN/.gitbook/assets/image (40).png new file mode 100644 index 0000000..472d6e8 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (40).png differ diff --git a/zh_CN/.gitbook/assets/image (41).png b/zh_CN/.gitbook/assets/image (41).png new file mode 100644 index 0000000..1b285bc Binary files /dev/null and b/zh_CN/.gitbook/assets/image (41).png differ diff --git a/zh_CN/.gitbook/assets/image (42).png b/zh_CN/.gitbook/assets/image (42).png new file mode 100644 index 0000000..46e504d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (42).png differ diff --git a/zh_CN/.gitbook/assets/image (43).png b/zh_CN/.gitbook/assets/image (43).png new file mode 100644 index 0000000..ece9b01 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (43).png differ diff --git a/zh_CN/.gitbook/assets/image (44).png b/zh_CN/.gitbook/assets/image (44).png new file mode 100644 index 0000000..c6c01ed Binary files /dev/null and b/zh_CN/.gitbook/assets/image (44).png differ diff --git a/zh_CN/.gitbook/assets/image (45).png b/zh_CN/.gitbook/assets/image (45).png new file mode 100644 index 0000000..db58b18 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (45).png differ diff --git a/zh_CN/.gitbook/assets/image (46).png b/zh_CN/.gitbook/assets/image (46).png new file mode 100644 index 0000000..63cd1e7 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (46).png differ diff --git a/zh_CN/.gitbook/assets/image (47).png b/zh_CN/.gitbook/assets/image (47).png new file mode 100644 index 0000000..c0c7213 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (47).png differ diff --git a/zh_CN/.gitbook/assets/image (48).png b/zh_CN/.gitbook/assets/image (48).png new file mode 100644 index 0000000..db58b18 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (48).png differ diff --git a/zh_CN/.gitbook/assets/image (49).png b/zh_CN/.gitbook/assets/image (49).png new file mode 100644 index 0000000..eca6a43 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (49).png differ diff --git a/zh_CN/.gitbook/assets/image (5).png b/zh_CN/.gitbook/assets/image (5).png new file mode 100644 index 0000000..47df9c3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (5).png differ diff --git a/zh_CN/.gitbook/assets/image (50).png b/zh_CN/.gitbook/assets/image (50).png new file mode 100644 index 0000000..e46d582 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (50).png differ diff --git a/zh_CN/.gitbook/assets/image (51).png b/zh_CN/.gitbook/assets/image (51).png new file mode 100644 index 0000000..c16201b Binary files /dev/null and b/zh_CN/.gitbook/assets/image (51).png differ diff --git a/zh_CN/.gitbook/assets/image (52).png b/zh_CN/.gitbook/assets/image (52).png new file mode 100644 index 0000000..4d91b1a Binary files /dev/null and b/zh_CN/.gitbook/assets/image (52).png differ diff --git a/zh_CN/.gitbook/assets/image (53).png b/zh_CN/.gitbook/assets/image (53).png new file mode 100644 index 0000000..dbcb27d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (53).png differ diff --git a/zh_CN/.gitbook/assets/image (54).png b/zh_CN/.gitbook/assets/image (54).png new file mode 100644 index 0000000..a945f99 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (54).png differ diff --git a/zh_CN/.gitbook/assets/image (55).png b/zh_CN/.gitbook/assets/image (55).png new file mode 100644 index 0000000..8796ba5 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (55).png differ diff --git a/zh_CN/.gitbook/assets/image (56).png b/zh_CN/.gitbook/assets/image (56).png new file mode 100644 index 0000000..75ef270 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (56).png differ diff --git a/zh_CN/.gitbook/assets/image (57).png b/zh_CN/.gitbook/assets/image (57).png new file mode 100644 index 0000000..43831ce Binary files /dev/null and b/zh_CN/.gitbook/assets/image (57).png differ diff --git a/zh_CN/.gitbook/assets/image (58).png b/zh_CN/.gitbook/assets/image (58).png new file mode 100644 index 0000000..57664b7 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (58).png differ diff --git a/zh_CN/.gitbook/assets/image (59).png b/zh_CN/.gitbook/assets/image (59).png new file mode 100644 index 0000000..f3e176f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (59).png differ diff --git a/zh_CN/.gitbook/assets/image (6).png b/zh_CN/.gitbook/assets/image (6).png new file mode 100644 index 0000000..db1690f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (6).png differ diff --git a/zh_CN/.gitbook/assets/image (60).png b/zh_CN/.gitbook/assets/image (60).png new file mode 100644 index 0000000..55087c7 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (60).png differ diff --git a/zh_CN/.gitbook/assets/image (61).png b/zh_CN/.gitbook/assets/image (61).png new file mode 100644 index 0000000..9355e1d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (61).png differ diff --git a/zh_CN/.gitbook/assets/image (62).png b/zh_CN/.gitbook/assets/image (62).png new file mode 100644 index 0000000..cd55d00 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (62).png differ diff --git a/zh_CN/.gitbook/assets/image (63).png b/zh_CN/.gitbook/assets/image (63).png new file mode 100644 index 0000000..640e5aa Binary files /dev/null and b/zh_CN/.gitbook/assets/image (63).png differ diff --git a/zh_CN/.gitbook/assets/image (64).png b/zh_CN/.gitbook/assets/image (64).png new file mode 100644 index 0000000..74f46ef Binary files /dev/null and b/zh_CN/.gitbook/assets/image (64).png differ diff --git a/zh_CN/.gitbook/assets/image (65).png b/zh_CN/.gitbook/assets/image (65).png new file mode 100644 index 0000000..fa3b653 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (65).png differ diff --git a/zh_CN/.gitbook/assets/image (66).png b/zh_CN/.gitbook/assets/image (66).png new file mode 100644 index 0000000..6fa4306 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (66).png differ diff --git a/zh_CN/.gitbook/assets/image (67).png b/zh_CN/.gitbook/assets/image (67).png new file mode 100644 index 0000000..7630887 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (67).png differ diff --git a/zh_CN/.gitbook/assets/image (68).png b/zh_CN/.gitbook/assets/image (68).png new file mode 100644 index 0000000..bb00817 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (68).png differ diff --git a/zh_CN/.gitbook/assets/image (69).png b/zh_CN/.gitbook/assets/image (69).png new file mode 100644 index 0000000..bf08a9a Binary files /dev/null and b/zh_CN/.gitbook/assets/image (69).png differ diff --git a/zh_CN/.gitbook/assets/image (7).png b/zh_CN/.gitbook/assets/image (7).png new file mode 100644 index 0000000..2f6c180 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (7).png differ diff --git a/zh_CN/.gitbook/assets/image (70).png b/zh_CN/.gitbook/assets/image (70).png new file mode 100644 index 0000000..0e07879 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (70).png differ diff --git a/zh_CN/.gitbook/assets/image (71).png b/zh_CN/.gitbook/assets/image (71).png new file mode 100644 index 0000000..0ca666d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (71).png differ diff --git a/zh_CN/.gitbook/assets/image (72).png b/zh_CN/.gitbook/assets/image (72).png new file mode 100644 index 0000000..ece9b01 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (72).png differ diff --git a/zh_CN/.gitbook/assets/image (73).png b/zh_CN/.gitbook/assets/image (73).png new file mode 100644 index 0000000..0e46b7b Binary files /dev/null and b/zh_CN/.gitbook/assets/image (73).png differ diff --git a/zh_CN/.gitbook/assets/image (74).png b/zh_CN/.gitbook/assets/image (74).png new file mode 100644 index 0000000..db58b18 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (74).png differ diff --git a/zh_CN/.gitbook/assets/image (75).png b/zh_CN/.gitbook/assets/image (75).png new file mode 100644 index 0000000..357513b Binary files /dev/null and b/zh_CN/.gitbook/assets/image (75).png differ diff --git a/zh_CN/.gitbook/assets/image (76).png b/zh_CN/.gitbook/assets/image (76).png new file mode 100644 index 0000000..87d1a9f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (76).png differ diff --git a/zh_CN/.gitbook/assets/image (77).png b/zh_CN/.gitbook/assets/image (77).png new file mode 100644 index 0000000..0ca666d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (77).png differ diff --git a/zh_CN/.gitbook/assets/image (78).png b/zh_CN/.gitbook/assets/image (78).png new file mode 100644 index 0000000..95702f3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (78).png differ diff --git a/zh_CN/.gitbook/assets/image (79).png b/zh_CN/.gitbook/assets/image (79).png new file mode 100644 index 0000000..a06e6bc Binary files /dev/null and b/zh_CN/.gitbook/assets/image (79).png differ diff --git a/zh_CN/.gitbook/assets/image (8).png b/zh_CN/.gitbook/assets/image (8).png new file mode 100644 index 0000000..da33fb3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (8).png differ diff --git a/zh_CN/.gitbook/assets/image (80).png b/zh_CN/.gitbook/assets/image (80).png new file mode 100644 index 0000000..69f815c Binary files /dev/null and b/zh_CN/.gitbook/assets/image (80).png differ diff --git a/zh_CN/.gitbook/assets/image (81).png b/zh_CN/.gitbook/assets/image (81).png new file mode 100644 index 0000000..e972ddb Binary files /dev/null and b/zh_CN/.gitbook/assets/image (81).png differ diff --git a/zh_CN/.gitbook/assets/image (82).png b/zh_CN/.gitbook/assets/image (82).png new file mode 100644 index 0000000..dd1b133 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (82).png differ diff --git a/zh_CN/.gitbook/assets/image (83).png b/zh_CN/.gitbook/assets/image (83).png new file mode 100644 index 0000000..4bfe23d Binary files /dev/null and b/zh_CN/.gitbook/assets/image (83).png differ diff --git a/zh_CN/.gitbook/assets/image (84).png b/zh_CN/.gitbook/assets/image (84).png new file mode 100644 index 0000000..99d8e0f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (84).png differ diff --git a/zh_CN/.gitbook/assets/image (85).png b/zh_CN/.gitbook/assets/image (85).png new file mode 100644 index 0000000..71c5637 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (85).png differ diff --git a/zh_CN/.gitbook/assets/image (86).png b/zh_CN/.gitbook/assets/image (86).png new file mode 100644 index 0000000..52b6ceb Binary files /dev/null and b/zh_CN/.gitbook/assets/image (86).png differ diff --git a/zh_CN/.gitbook/assets/image (87).png b/zh_CN/.gitbook/assets/image (87).png new file mode 100644 index 0000000..f9380eb Binary files /dev/null and b/zh_CN/.gitbook/assets/image (87).png differ diff --git a/zh_CN/.gitbook/assets/image (88).png b/zh_CN/.gitbook/assets/image (88).png new file mode 100644 index 0000000..d9a8400 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (88).png differ diff --git a/zh_CN/.gitbook/assets/image (89).png b/zh_CN/.gitbook/assets/image (89).png new file mode 100644 index 0000000..654a8b6 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (89).png differ diff --git a/zh_CN/.gitbook/assets/image (9).png b/zh_CN/.gitbook/assets/image (9).png new file mode 100644 index 0000000..16115b0 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (9).png differ diff --git a/zh_CN/.gitbook/assets/image (90).png b/zh_CN/.gitbook/assets/image (90).png new file mode 100644 index 0000000..b99288f Binary files /dev/null and b/zh_CN/.gitbook/assets/image (90).png differ diff --git a/zh_CN/.gitbook/assets/image (91).png b/zh_CN/.gitbook/assets/image (91).png new file mode 100644 index 0000000..f9380eb Binary files /dev/null and b/zh_CN/.gitbook/assets/image (91).png differ diff --git a/zh_CN/.gitbook/assets/image (92).png b/zh_CN/.gitbook/assets/image (92).png new file mode 100644 index 0000000..626b4e6 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (92).png differ diff --git a/zh_CN/.gitbook/assets/image (93).png b/zh_CN/.gitbook/assets/image (93).png new file mode 100644 index 0000000..4bcf779 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (93).png differ diff --git a/zh_CN/.gitbook/assets/image (94).png b/zh_CN/.gitbook/assets/image (94).png new file mode 100644 index 0000000..c78be69 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (94).png differ diff --git a/zh_CN/.gitbook/assets/image (95).png b/zh_CN/.gitbook/assets/image (95).png new file mode 100644 index 0000000..93526bc Binary files /dev/null and b/zh_CN/.gitbook/assets/image (95).png differ diff --git a/zh_CN/.gitbook/assets/image (96).png b/zh_CN/.gitbook/assets/image (96).png new file mode 100644 index 0000000..d369963 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (96).png differ diff --git a/zh_CN/.gitbook/assets/image (97).png b/zh_CN/.gitbook/assets/image (97).png new file mode 100644 index 0000000..d8bf8a4 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (97).png differ diff --git a/zh_CN/.gitbook/assets/image (98).png b/zh_CN/.gitbook/assets/image (98).png new file mode 100644 index 0000000..21af97c Binary files /dev/null and b/zh_CN/.gitbook/assets/image (98).png differ diff --git a/zh_CN/.gitbook/assets/image (99).png b/zh_CN/.gitbook/assets/image (99).png new file mode 100644 index 0000000..9a25a20 Binary files /dev/null and b/zh_CN/.gitbook/assets/image (99).png differ diff --git a/zh_CN/.gitbook/assets/image-10.png b/zh_CN/.gitbook/assets/image-10.png new file mode 100644 index 0000000..9043d52 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-10.png differ diff --git a/zh_CN/.gitbook/assets/image-11.png b/zh_CN/.gitbook/assets/image-11.png new file mode 100644 index 0000000..ba22b08 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-11.png differ diff --git a/zh_CN/.gitbook/assets/image-13.png b/zh_CN/.gitbook/assets/image-13.png new file mode 100644 index 0000000..90761dd Binary files /dev/null and b/zh_CN/.gitbook/assets/image-13.png differ diff --git a/zh_CN/.gitbook/assets/image-14.png b/zh_CN/.gitbook/assets/image-14.png new file mode 100644 index 0000000..21f2fcf Binary files /dev/null and b/zh_CN/.gitbook/assets/image-14.png differ diff --git a/zh_CN/.gitbook/assets/image-15.png b/zh_CN/.gitbook/assets/image-15.png new file mode 100644 index 0000000..74ef518 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-15.png differ diff --git a/zh_CN/.gitbook/assets/image-16.png b/zh_CN/.gitbook/assets/image-16.png new file mode 100644 index 0000000..7c5d7b3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-16.png differ diff --git a/zh_CN/.gitbook/assets/image-17.png b/zh_CN/.gitbook/assets/image-17.png new file mode 100644 index 0000000..e72af9e Binary files /dev/null and b/zh_CN/.gitbook/assets/image-17.png differ diff --git a/zh_CN/.gitbook/assets/image-18.png b/zh_CN/.gitbook/assets/image-18.png new file mode 100644 index 0000000..8210586 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-18.png differ diff --git a/zh_CN/.gitbook/assets/image-19.png b/zh_CN/.gitbook/assets/image-19.png new file mode 100644 index 0000000..a4af82d Binary files /dev/null and b/zh_CN/.gitbook/assets/image-19.png differ diff --git a/zh_CN/.gitbook/assets/image-2.png b/zh_CN/.gitbook/assets/image-2.png new file mode 100644 index 0000000..824cac9 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-2.png differ diff --git a/zh_CN/.gitbook/assets/image-20.png b/zh_CN/.gitbook/assets/image-20.png new file mode 100644 index 0000000..ad1e0f9 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-20.png differ diff --git a/zh_CN/.gitbook/assets/image-3.png b/zh_CN/.gitbook/assets/image-3.png new file mode 100644 index 0000000..1e57166 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-3.png differ diff --git a/zh_CN/.gitbook/assets/image-4.png b/zh_CN/.gitbook/assets/image-4.png new file mode 100644 index 0000000..ea6193a Binary files /dev/null and b/zh_CN/.gitbook/assets/image-4.png differ diff --git a/zh_CN/.gitbook/assets/image-5.png b/zh_CN/.gitbook/assets/image-5.png new file mode 100644 index 0000000..08850d3 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-5.png differ diff --git a/zh_CN/.gitbook/assets/image-6.png b/zh_CN/.gitbook/assets/image-6.png new file mode 100644 index 0000000..2dfdddd Binary files /dev/null and b/zh_CN/.gitbook/assets/image-6.png differ diff --git a/zh_CN/.gitbook/assets/image-7.png b/zh_CN/.gitbook/assets/image-7.png new file mode 100644 index 0000000..11fefe0 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-7.png differ diff --git a/zh_CN/.gitbook/assets/image-8.png b/zh_CN/.gitbook/assets/image-8.png new file mode 100644 index 0000000..86e2f98 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-8.png differ diff --git a/zh_CN/.gitbook/assets/image-9.png b/zh_CN/.gitbook/assets/image-9.png new file mode 100644 index 0000000..9043d52 Binary files /dev/null and b/zh_CN/.gitbook/assets/image-9.png differ diff --git a/zh_CN/.gitbook/assets/image.png b/zh_CN/.gitbook/assets/image.png new file mode 100644 index 0000000..c1d4744 Binary files /dev/null and b/zh_CN/.gitbook/assets/image.png differ diff --git a/zh_CN/.gitbook/assets/notion.png b/zh_CN/.gitbook/assets/notion.png new file mode 100644 index 0000000..8948c67 Binary files /dev/null and b/zh_CN/.gitbook/assets/notion.png differ diff --git a/zh_CN/.gitbook/assets/openaiKey.png b/zh_CN/.gitbook/assets/openaiKey.png new file mode 100644 index 0000000..f0e0605 Binary files /dev/null and b/zh_CN/.gitbook/assets/openaiKey.png differ diff --git a/zh_CN/.gitbook/assets/service-api-page.png b/zh_CN/.gitbook/assets/service-api-page.png new file mode 100644 index 0000000..01d53bd Binary files /dev/null and b/zh_CN/.gitbook/assets/service-api-page.png differ diff --git a/zh_CN/.gitbook/assets/sync-notion.png b/zh_CN/.gitbook/assets/sync-notion.png new file mode 100644 index 0000000..fdb788e Binary files /dev/null and b/zh_CN/.gitbook/assets/sync-notion.png differ diff --git a/zh_CN/README.md b/zh_CN/README.md new file mode 100644 index 0000000..280e22c --- /dev/null +++ b/zh_CN/README.md @@ -0,0 +1,65 @@ +--- +description: Dify 一词源自 Define + Modify,意指定义并且持续的改进你的 AI 应用,它是为你而做的(Do it for you)。 +--- + +# 欢迎使用 Dify + +如果你对诸如 GPT-4 之类的 LLM 技术高速发展感到惊奇和兴奋,迫不及待的想用它做点什么有用的东西!可你的头脑里又有一切令人困惑的问题: + +* 我该如何“训练”一个基于我的内容的模型? +* 怎么让 AI 知道 2021 年以后的事情? +* 如何避免让 AI 跟用户胡说八道? +* 微调(Fine-tuning)和嵌入(Embedding)是什么意思? + +那么,Dify 正好能满足你的需要。 + +Dify 的目标是让开发者(甚至非开发者)可以快速基于大型语言模型搭建出有用的东西,并确保它是**可视化、可运营、可改进**的。 + +> 我们塑造了工具,然后工具塑造了我们。——马歇尔·麦克卢汉(Marshall McLuhan) + +你可以使用 Dify 快速搭建一个 Web App,其生成的前端代码可以托管在 Dify 上。如果你想基于这个 Web App 进一步开发,你可以从 GitHub 中得到这些 [Template](advanced/based-on-frontend-templates.md),部署到任何地方(例如 Vercel 或你的服务器)。或者,你也可以基于 WebAPI 开发你自己的 Web 前端、移动 App…总之,为你省下了后端开发的工作。 + +不止于此,Dify 的核心理念是在一个可视化的界面中创建、配置、改进你的应用。基于 LLM 的应用开发有一个持续改进的生命周期,你可能需要基于自己的内容让 AI 给予正确的回答,或是想提升 AI 的准确性和叙述风格,甚至让它去 YouTube 上下载一个字幕作为上下文。 + +这当中将会有些逻辑设计、上下文增强、数据准备等需要花些功夫的事情,如果没有工具你可能会寸步难行…我们称这个过程为 **LLMOps**。 + +### 下一步行动 + +* 看看这些由 Dify 创建的应用 +* 在云端版中快速创建应用 +* 安装 Dify 到你的服务器 + +> 可能只有少数公司有预算来构建和管理像 GPT-3 这样的大型语言模型 (LLM),但是将会有许多价值超过 10 亿美元的“第二层”公司建立起来下一个十年。 +> ——Sam Altman + +正如 LLM 技术的快速发展一样,Dify 还是一个正在不断进步的产品,这份文档的内容和产品可能会有些出入。你可以在 GitHub 或 Discord 上与我们分享想法。 + +### Q\&A + +**Q: 我能用 Dify 做什么?** + +A: Dify 是一个简单且能力丰富的自然语言编程工具。你可以用它搭建商用级应用,个人助理。如果你想自己开发应用,Dify 也能为你省下接入 OpenAI 的后端工作,但使用我们逐步提供高的可视化运营能力,你可以持续的改进和训练你的 GPT 模型。 + +**Q: 如何使用 Dify 训练自己的模型?** + +A: 一个有价值的应用由 Prompt Engineering、上下文增强和 Fine-tune 三个环节组成。我们创造了一种 Prompt 结合编程语言的 Hybrid 编程方式(类似一个模版引擎),你可以轻松的完成长文本嵌入,或抓取用户输入的一个 Youtube 视频的字幕——这些都将作为上下文提交给 LLMs 进行计算。我们十分注重应用的可运营性,你的用户在使用 App 期间产生的数据,可进行分析、标记和持续训练。以上环节如果没有好的工具支持,可能会消耗你大量的时间。 + +**Q: 如果要创建一个自己的应用,我需要准备什么?** + +A: 你选择一个诸如 OpenAI 的模型供应商,我们的云端版内置了 GPT-4 的试用模型,你可以填入自己的 API Key。随后你就可以创建一个应用,基于 Prompt 或自己的上下文。 + +**Q: Dify 搭建的应用能够保持会话吗?** + +A: 可以,如果你创建了对话型应用,它内置了会话保存的能力,在生成的 WebApp 和 API 中都支持。 + +**Q: LLMOps 和 MLOps 有什么区别?** + +A: 过去的 MLOps 是让开发者从零开始训练模型,而 LLMOps 基于诸如 GPT-4 这样的强大模型之上开发 AI 原生应用,你可以查阅这篇[文章](https://blog.dify.ai/unleashing-the-power-of-llm-embeddings-with-datasets-revolutionizing-mlops/)。 + +**Q: 提供哪些界面语言?** + +A: 现已支持英文与中文,你可以为我们贡献语言包。 + +**Q: LangGenius 是什么?** + +A: LangGenius 是 Dify 正式发布前的产品名称,我们还在更新所有的文档。Dify 一词源自 **D**efine + Mo**dify**,意指定义并且持续的改进你的 AI 应用,它是为你而做的(Do it for you)。 diff --git a/zh_CN/SUMMARY.md b/zh_CN/SUMMARY.md new file mode 100644 index 0000000..26c046a --- /dev/null +++ b/zh_CN/SUMMARY.md @@ -0,0 +1,66 @@ +# Table of contents + +## 入门 + +* [欢迎使用 Dify](README.md) +* [使用云服务](getting-started/cloud.md) +* [部署社区版](getting-started/install-self-hosted/README.md) + * [Docker Compose 部署](getting-started/install-self-hosted/docker-compose.md) + * [本地源码启动](getting-started/install-self-hosted/local-source-code.md) + * [单独启动前端 Docker 容器](getting-started/install-self-hosted/start-the-frontend-docker-container.md) + * [环境变量说明](getting-started/install-self-hosted/environments.md) +* [什么是 LLMOps?](getting-started/what-is-llmops.md) +* [常见问题](getting-started/faq/README.md) + * [本地部署相关](getting-started/faq/install-faq.md) + * [LLM 配置与使用](getting-started/faq/llms-use-faq.md) + * [API 使用](getting-started/faq/api-use-faq.md) + +## 应用 + +* [创建应用](application/creating-an-application.md) +* [快速发布 AI 站点](application/launch-your-webapp-quickly.md) +* [设计提示词&编排应用](application/prompt-engineering/README.md) + * [文本生成型应用](application/prompt-engineering/text-generation-application.md) + * [对话型应用](application/prompt-engineering/conversation-application.md) +* [基于 APIs 开发](application/developing-with-apis.md) +* [日志与标注](application/logs.md) + +## Web 应用 + +* [概览](web-application/overview.md) +* [文本生成型应用](web-application/text-generator.md) +* [对话型应用](web-application/conversation-application.md) + +## 探索 + +* [智聊](explore/chat.md) + +## 进阶使用 + +* [数据集管理](advanced/datasets/README.md) + * [从 Notion 导入数据](advanced/datasets/sync-from-notion.md) +* [插件](advanced/ai-plugins.md) +* [基于前端模版再开发](advanced/based-on-frontend-templates.md) +* [模型配置](advanced/model-configuration/README.md) + * [接入 Hugging Face 上的开源模型](advanced/model-configuration/hugging-face.md) + * [接入 Replicate 上的开源模型](advanced/model-configuration/replicate.md) +* [更多集成](advanced/more-integration.md) + +## 使用案例 + +* [如何训练出专属于“你”的问答机器人?](use-cases/train-a-qa-chatbot-that-belongs-to-you.md) +* [教你十几分钟不用代码创建 Midjourney 提示词机器人](use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code.md) +* [零代码,使用 Dify 两分钟接入企业微信](use-cases/integrate-with-wecom-using-dify.md) +* [构建一个 Notion AI 助手](use-cases/build-an-notion-ai-assistant.md) +* [如何在几分钟内创建一个带有业务数据的官网 AI 智能客服](use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md) + +## 社区 + +* [寻求支持](community/support.md) +* [开源协议](community/open-source.md) +* [数据安全](community/data-security.md) + +## 用户协议 + +* [服务协议](user-agreement/terms-of-service.md) +* [隐私协议](user-agreement/privacy-policy.md) diff --git a/zh_CN/advanced/ai-plugins.md b/zh_CN/advanced/ai-plugins.md new file mode 100644 index 0000000..e8e0367 --- /dev/null +++ b/zh_CN/advanced/ai-plugins.md @@ -0,0 +1,5 @@ +# 插件 + +{% hint style="info" %} +插件是 Dify 即将推出的功能,你可以将插件纳入 App 编排之中,通过一个 API 或 WebApp 访问具备插件能力的 AI 应用。Dify 兼容了 ChatGPT Plugins 标准的插件,并提供了一些原生插件。 +{% endhint %} diff --git a/zh_CN/advanced/based-on-frontend-templates.md b/zh_CN/advanced/based-on-frontend-templates.md new file mode 100644 index 0000000..53d556e --- /dev/null +++ b/zh_CN/advanced/based-on-frontend-templates.md @@ -0,0 +1,38 @@ +# 基于前端模版再开发 + +如果开发者是从头开发新产品或者在产品原型设计阶段,你可以使用 Dify 快速发布 AI 站点。与此同时,Dify 希望开发者能够充分自由的创造不同形式的前端应用,为此我们提供了: + +* **SDK**,用于在各种语言中快速接入 Dify API +* **WebApp Template**,每种类型应用的 WebApp 开发脚手架 + +WebApp Template 是基于 MIT 协议开源的,你可以充分自由的修改并部署他们,以实现 Dify 的所有能力。或者作为你实现自己 App 的一份参考代码。 + +你可以在 GitHub 中找到这些 Template: + +* [对话型应用](https://github.com/langgenius/webapp-conversation) +* [文本生成型应用](https://github.com/langgenius/webapp-text-generator) + +使用 WebApp 模版最快的方法就是在 GitHub 中点击「使用这个模版」,它相当于 Fork 了一个新的仓库。随后你需要配置 Dify 的 App ID 和 API Key,类似这样: + +````javascript +export const APP_ID = '' +export const API_KEY = '' +``` + +More config in `config/index.ts`: +```js +export const APP_INFO: AppInfo = { + "title": 'Chat APP', + "description": '', + "copyright": '', + "privacy_policy": '', + "default_language": 'zh-Hans' +} + +export const isShowPrompt = true +export const promptTemplate = '' +```` + +每一种 WebApp 模版都提供了 README 文件,内含部署方式的说明。通常,WebApp 模版都包含了一个轻后端服务,这是为了确保开发者的 API KEY 不会直接暴露给用户。 + +这些 WebApp 模版能够帮助你快速搭建起 AI 应用原型,并使用 Dify 的所有能力。如果你基于它们开发了自己的应用或新的模版,欢迎你与我们分享。 diff --git a/zh_CN/advanced/datasets/README.md b/zh_CN/advanced/datasets/README.md new file mode 100644 index 0000000..7798762 --- /dev/null +++ b/zh_CN/advanced/datasets/README.md @@ -0,0 +1,114 @@ +# 数据集管理 + +大多数语言模型采用较为陈旧的训练数据,并且对每次请求的上下文有长度限制。例如 GPT-3.5 是基于 2021 年的语料进行训练的,且有每次约 4K Token 的限制。这意味着开发者如果想让 AI 应用基于最新的、私有的上下文对话,必须使用类似嵌入(Embedding)之类的技术。 + +Dify 的数据集功能可以使开发者(甚至非技术人员)以简单的方式管理数据集,并自动集成至 AI 应用中。你只需准备文本内容,例如: + +* 长文本内容(TXT、Markdown、JSONL 甚至是 PDF 文件) +* 结构化数据(CSV、Excel 等) + +另外,我们正在逐步支持从诸多数据源同步数据至数据集,包括: + +* 网页 +* Notion +* Github +* 数据库 +* …… + +{% hint style="info" %} +**情景**:如果你的公司想基于现有知识库和产品文档建立一个 AI 客服助手,你可以在 Dify 中将文档上传至数据集,并建立一个对话型应用。这在过去可能需要花费你数周的时间,且难以持续维护。 +{% endhint %} + +### 数据集与文档 + +在 Dify 中,**数据集(Datasets)**是一些**文档(Documents)**的集合。一个数据集可以被整体集成至一个应用中作为上下文使用。文档可以由开发者或运营人员上传,或由其它数据源同步(通常对应数据源中的一个文件单位)。 + +**上传文档的步骤:** + +1. 上传你的文件,通常是长文本文件或表格文件。 +2. 分段、清洗并预览 +3. 由 Dify 提交至 LLM 供应商嵌入为向量数据,并存储 +4. 为文档设置元数据 +5. 可以在应用中使用了🎉! + +### 创建数据集 + +在 Dify 主导航栏中点击**数据集**,在该页面你可以看到已有的数据集。你可以点击**创建数据集**进入创建向导: + +* 如果你已经准备好了文件,可以从上传文件开始 +* 如果你还没有准备好文档,可以先创建一个空数据集 + +如果你在创建数据集时选择了使用外部数据源,该数据集的类型不可更改。这是为了防止单一数据集存在多数据源而造成的管理困难。如果你需要使用多个数据源,建议创建多个数据集。 + +#### 编辑良好的数据集描述 + +当一个应用中引用多个数据集时,AI 会根据用户的提问和数据集的描述来决定使用哪个数据集来回答用户的问题。因此,良好的数据集描述能提升 AI 选择数据集的准确率。 + +编写良好的数据集描述的要点是写清楚数据集包含的内容和特点。**数据集的描述建议以这个开头:`仅当你想要回答的问题是关于以下内容时有用:具体描述`**。一个房地产数据集的描述: + +> 仅当你想要回答的问题是关于以下内容时有用: 2010 年到 2020 年的全球房地产市场数据。这些数据包括每个城市的平均房价、房产销售量、房屋类型等信息。此外,该数据集还包括了一些经济指标,如 GDP、失业率等,以及一些社会指标,如人口数量、教育水平等,这些指标可以帮助分析房地产市场的趋势和影响因素。 +> +> 通过这些数据,我们可以了解全球房地产市场的发展趋势,分析各个城市的房价变化,以及了解经济和社会因素对房地产市场的影响。 + +### 上传文档 + +* 选择你要上传的文件,支持批量上传; +* 预览全文; +* 进行分段和清洗; +* 等待 Dify 为你处理这些数据,通常该步骤在 LLM 供应商中需要消耗 Token。 + +### 文本分段与清洗 + +文本数据的分段与清洗是指 Dify 自动将你的数据进行段落分段 & 向量化处理,使得用户的提问(输入)能匹配到相关的文本段落(Q to P),最后输出结果。 + +上传一个数据集的文档,你需要选择文本的**索引方式**来指定数据的匹配方式。这会影响到 AI 在回复问题时的准确度。 + +**高质量模式**下,将调用 OpenAI 的嵌入接口进行处理,以在用户查询时提供更高的准确度。 + +**经济模式**下,会使用离线的向量引擎、关键词索引等方式,降低了准确度但无需花费 Token。 + +**Q\&A 分段模式**下,Q\&A 分段模式功能,与上述普通的「Q to P」(问题匹配文本段落)匹配模式不同,它是采用「Q to Q」(问题匹配问题)匹配工作,在文档经过分段后,经过总结为每一个分段生成 Q\&A 匹配对,当用户提问时,系统会找出与之最相似的问题,然后返回对应的分段作为答案。这种方式更加精确,因为它直接针对用户问题进行匹配,可以更准确地获取用户真正需要的信息。 + +> 问题文本是具有完整语法结构的自然语言,而不是文档检索任务中的一些关键字,所以 Q to Q (问题匹配问题)的模式会令语意和匹配更加清晰,并同时满足一些高频和高相似度问题的提问场景。 + +

Q&A 分段模式下被总结成多个 Q&A 对的文本

+ +

Q to P 与 Q to Q 的索引模式区别

+ +### 对文档进行修改 + +由于技术原因,如果开发者对文档进行以下修改,Dify 会为你创建一个新的文档,而旧的文档会被存档和停用: + +* 调整分段和清洗设置 +* 重新上传文件 + +我们支持对分段与清洗后的文本进行自定义增删改,你可以动态调整自己的分段信息,让你的数据集更加精准。通过点击数据集中 **文档 --> 段落 --> 编辑** 可修改段落内容。通过点击 **文档 --> 段落--> 添加新分段** 可手动新增新的分段内容。 + +

新增新段落

+ +### 通过 API 维护数据集 + +TODO + +### 数据集设置 + +在数据集的左侧导航中点击**设置**,你可以改变数据集的以下设置项: + +* 数据集名称,用于识别一个数据集。 +* 数据集描述,能够让 AI 更好的适时取用数据集,如果描述为空则会使用 Dify 的自动索引策略 +* 权限,可选择 只有我 或 所有团队成员,不具有权限的人将无法查阅和编辑数据集。 +* 修改索引模式。注意:索引模式如果从经济升级为高质量会带来额外的 Token 消耗。而从高质量降级为经济则不会消耗 Token。 + +### 集成至应用 + +数据集准备完成后需集成到应用中,当 AI 应用处理用户请求时,会自动将与之关联的数据集内容作为上下文参考。 + +1. 进入**应用 - 提示词编排**页面 +2. 在上下文选项中,选择需要集成的数据集 +3. 保存设置以完成集成 + +### Q\&A + +**Q: 上传 PDF 解析乱码怎么办?** + +A: 如果你的 PDF 在特定格式内容下解析出现乱码的情况,可以考虑将 PDF 转成 Markdown 格式,或目前 Markdown 的准确度会更高,或减少 PDF 内的图片、表格等格式内容。对于 PDF 的使用体验我们正在研究优化方案。 diff --git a/zh_CN/advanced/datasets/sync-from-notion.md b/zh_CN/advanced/datasets/sync-from-notion.md new file mode 100644 index 0000000..9061362 --- /dev/null +++ b/zh_CN/advanced/datasets/sync-from-notion.md @@ -0,0 +1,28 @@ +# 从 Notion 导入数据 + + + +Dify 数据集支持从 Notion 导入,并设置 **同步** 使得数据在 Notion 更新后便自动同步到 Dify。 + +### 授权验证 + +1. 在创建数据集,选择数据源时,点击 **同步自 Notion 内容-- 去绑定,**根据提示完成授权验证**。** +2. 你也可以:进入 **设置 -- 数据来源 -- 添加数据源** 中点击 Notion 来源 **绑定** ,完成授权验证。 + +

绑定 Notion

+ +### 导入 Notion 数据 + +完成验证授权后,进入创建数据集页面,点击 **同步自 Notion 内容 ,**选择需要的授权页面进行导入。 + +### 进行分段和清洗 + +接下来,选择你的**分段设置**和**索引方式**,**保存并处理**。等待 Dify 为你处理这些数据,通常该步骤在 LLM 供应商中需要消耗 Token。Dify 不仅支持普通类型页面导入,并且会将 database 类型下的页面属性进行汇总保存。 + +_**请注意:图片和文件暂不支持导入,表格类数据会被转换为文本展示。**_ + +### 同步 Notion 数据 + +如果您的 Notion 内容有修改,您可以直接在 Dify 数据集 **文档列表页**中点击 **同步** 即可进行数据一键同步,该步骤是需要消耗 Token。 + +

同步 Notion 内容

diff --git a/zh_CN/advanced/model-configuration/README.md b/zh_CN/advanced/model-configuration/README.md new file mode 100644 index 0000000..021ee98 --- /dev/null +++ b/zh_CN/advanced/model-configuration/README.md @@ -0,0 +1,89 @@ +# 模型配置 + +Dify 目前已支持主流的模型供应商,例如 OpenAI 的 GPT 系列。以下是我们目前支持的模型供应商: + +* OpenAI +* Azure OpenAI Service +* Anthropic +* Hugging Face Hub +* Replicate +* 讯飞星火 +* 文心一言 +* 通义千问 +* Minimax +* ChatGLM + +根据技术变化和用户需求,我们将陆续支持更多 LLM 供应商。 + +### 托管模型试用服务 + +我们为 Dify 云服务的用户提供了不同模型的试用额度,请在该额度耗尽前设置你自己的模型供应商,否则将会影响应用的正常使用。 + +* **OpenAI 托管模型试用:**我们提供 500 次调用次数供你试用体验,可用于 GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003 模型。 +* **Antropic Claude 托管模型试用:**我们提供 1000 次调用次数供你试用体验,可用于 Claude-instant-1、Claude2 模型。 + +### 模型类型 + +在 Dify 中,我们按模型的使用场景将模型分为以下 3 类: + +1. 系统推理模型。在创建的应用中,用的是该类型的模型。智聊、对话名称生成、下一步问题建议用的也是推理模型。 +2. Embedding 模型。在数据集中,将分段过的文档做 Embedding 用的是该类型的模型。在使用了数据集的应用中,将用户的提问做 Embedding 处理也是用的该类型的模型。 +3. 语音转文字 模型。将对话型应用中,将语音转文字用的是该类型的模型。 + +### 设置默认模型 + +Dify 在需要模型时,会根据使用场景来选择设置过的默认模型。在 `设置 > 模型供应商` 中设置默认模型。 + + +
+ +### 接入模型设置 + +在 Dify 的 `设置 > 模型供应商` 中设置要接入的模型。 + +
+ +模型供应商分为两种: + +1. 自有模型。该类型的模型供应商提供的是自己开发的模型。如 OpenAI,Anthropic 等。 +2. 托管模型。该类型的模型供应商提供的是第三方模型。如 Hugging Face,Replicate 等。 + + +在 Dify 中接入不同类型的模型供应商的方式稍有不同。 + + + +**接入自有模型的模型供应商** + +接入自有模型的供应商后,Dify 会自动接入该供应商下的所有模型。 + + +在 Dify 中设置对应模型供应商的 API key,即可接入该模型供应商。获取模型供应商的 API 地址如下: + +* OpenAI: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys) +* Anthropic:[https://console.anthropic.com/account/keys](https://console.anthropic.com/account/keys) +* 讯飞星火:[https://www.xfyun.cn/solutions/xinghuoAPI](https://www.xfyun.cn/solutions/xinghuoAPI) +* Minimax:[https://api.minimax.chat/user-center/basic-information/interface-key](https://api.minimax.chat/user-center/basic-information/interface-key) +* 文心一言:[https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application) +* 通义千问:[https://dashscope.console.aliyun.com/api-key_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k](https://dashscope.console.aliyun.com/api-key_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k) +* ChatGLM:该模型供应商并未提供官方的服务。但支持自部署([部署文档](https://github.com/THUDM/ChatGLM2-6B#%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85))。 + +{% hint style="info" %} +Dify 使用了 [PKCS1_OAEP](https://pycryptodome.readthedocs.io/en/latest/src/cipher/oaep.html) 来加密存储用户托管的 API 密钥,每个租户均使用了独立的密钥对进行加密,确保你的 API 密钥不被泄漏。 +{% endhint %} + +**接入托管模型的模型供应商** + +托管类型的供应商上面有很多第三方模型。接入模型需要一个个的添加。具体接入方式如下: + +* [Hugging Face](hugging-face.md)。 +* [Replicate](replicate.md)。 + + + +### 使用模型 + +配置完模型后,就可以在应用中使用这些模型了: + +
+ diff --git a/zh_CN/advanced/model-configuration/hugging-face.md b/zh_CN/advanced/model-configuration/hugging-face.md new file mode 100644 index 0000000..53b7ce3 --- /dev/null +++ b/zh_CN/advanced/model-configuration/hugging-face.md @@ -0,0 +1,65 @@ +# 接入 Hugging Face 上的开源模型 + +Dify 支持 Hugging Face 上类型是 text-generation 和 text2text-generation 类型的模型。 +具体步骤如下: + +1. 你需要有 Hugging Face 账号([注册地址](https://huggingface.co/join))。 +2. 设置 Hugging Face 的 API key([获取地址](https://huggingface.co/settings/tokens))。 +3. 选择模型,进入 [Hugging Face 模型列表页](https://huggingface.co/models?pipeline_tag=text-generation\&sort=trending),筛选类型是 [text-generation](https://huggingface.co/models?pipeline_tag=text-generation\&sort=trending) 和 [text2text-generation](https://huggingface.co/models?pipeline_tag=text2text-generation\&sort=trending) 的模型。 + +
+ + +Dify 支持用两种方式接入 Hugging Face 上的模型: + +1. Hosted Inference API。这种方式是用的 Hugging Face 官方部署的模型。不需要付费。但缺点是,只有少量模型支持这种方式。 +2. Inference Endpiont。这种方式是用 Hugging Face 接入的 AWS 等资源来部署模型,需要付费。 + +### 接入 Hosted Inference API 的模型 + +#### 1 选择模型 + +模型详情页右侧有包含 Hosted inference API 的 区域才支持 Hosted inference API 。如下图所: + +
+ +在模型详情页,可以获得模型的名称。 + + +
+ +#### 2 在 Dify 中使用接入模型 + +在 `设置 > 模型供应商 > Hugging Face` 的 Endpoint Type 选择 Hosted Inference API。如下图所示: + +
+ +API Token 为文章开头设置的 API Key。模型名字为上一步获得的模型名字。 + +### 方式2: Inference Endpiont + +#### 1 选择要部署模型 + +模型详情页右侧的 `Deploy按钮` 下有 Inference Endpionts 选项的模型才支持 Inference Endpiont。如下图所示: + + +
+ +#### 2 部署模型 + +点击模型的部署按钮,选择 Inference Endpiont 选项。如果之前没绑过银行卡的,会需要绑卡。按流程走即可。绑过卡后,会出现下面的界面:按需求修改配置,点击左下角的 Create Endpoint 来创建 Inference Endpiont。 + +
+ +模型部署好后,就可以看到 Endpoint URL。 + +
+ +#### 3 在 Dify 中使用接入模型 + +在 `设置 > 模型供应商 > Hugging Face` 的 Endpoint Type 选择 Inference Endpoints。如下图所示: + +
+ +API Token 为文章开头设置的 API Key。模型名字随便起。Endpoint URL 为 上一步部署模型成功后获得的 Endpoint URL。 + diff --git a/zh_CN/advanced/model-configuration/replicate.md b/zh_CN/advanced/model-configuration/replicate.md new file mode 100644 index 0000000..bc66f53 --- /dev/null +++ b/zh_CN/advanced/model-configuration/replicate.md @@ -0,0 +1,18 @@ +# 接入 Replicate 上的开源模型 + +Dify 支持接入 Replicate 上的 [Language models](https://replicate.com/collections/language-models) 和 [Embedding models](https://replicate.com/collections/embedding-models)。Language models 对应 Dify 的推理模型,Embedding models 对应 Dify 的 Embedding 模型。 + + +具体步骤如下: + +1. 你需要有 Replicate 的账号([注册地址](https://replicate.com/signin?next=/docs))。 +2. 获取 API Key([获取地址](https://replicate.com/account/api-tokens))。 +3. 挑选模型。在 [Language models](https://replicate.com/collections/language-models) 和 [Embedding models](https://replicate.com/collections/embedding-models) 下挑选模型。 +4. 在 Dify 的 `设置 > 模型供应商 > Replicate` 中添加模型。 + +
+ +API key 为第 2 步中设置的 API Key。Model Name 和 Model Version 可以在模型详情页中找到: + + +
diff --git a/zh_CN/advanced/more-integration.md b/zh_CN/advanced/more-integration.md new file mode 100644 index 0000000..c22288e --- /dev/null +++ b/zh_CN/advanced/more-integration.md @@ -0,0 +1,3 @@ +# 更多集成 + +TODO diff --git a/zh_CN/application/creating-an-application.md b/zh_CN/application/creating-an-application.md new file mode 100644 index 0000000..6eedc60 --- /dev/null +++ b/zh_CN/application/creating-an-application.md @@ -0,0 +1,80 @@ +# 创建应用 + +在 Dify 中,一个“应用”是指基于 GPT 等大型语言模型构建的实际场景应用。通过创建应用,您可以将智能 AI 技术应用于特定的需求。它既包含了开发 AI 应用的工程范式,也包含了具体的交付物。 + +简而言之,一个应用为开发者交付了: + +* 封装友好的 LLM API,可由后端或前端应用直接调用,通过 Token 鉴权 +* 开箱即用、美观且托管的 Web App,你可以 WebApp 的模版进行二次开发 +* 一套包含 Prompt Engineering、上下文管理、日志分析和标注的易用界面 + +你可以任选**其中之一**或**全部**,来支撑你的 AI 应用开发。 + +### 应用类型 + +Dify 中提供了两种应用类型:文本生成型与对话型,今后或许会出现更多应用范式(我们应该会及时跟进),Dify 的最终目标是能覆盖 80% 以上的常规 LLM 应用情景。 + +文本生成型与对话型应用的区别见下表: + +
文本生成型对话型
WebApp 界面表单+结果式聊天式
WebAPI 端点completion-messageschat-messages
交互方式一问一答多轮对话
流式结果返回支持支持
上下文保存当次持续
用户输入表单支持支持
数据集与插件支持支持
AI 开场白不支持支持
情景举例翻译、判断、索引聊天或一切
+ +### 创建应用的步骤 + +

创建应用

+ +
+ +第 1 步,以管理员登录 Dify 后,前往主导航应用页 + + + +
+ +
+ +第 2 步,点击“创建新应用” + +此外,我们在创建应用界面中提供了一些模版,你可以在创建应用的弹窗中点击**从模版创建**,这些模版将为你要开发的应用提供启发和参考。 + +
+ +
+ +第 3 步,选择对话型或文本生成型应用,并为它起个名字 + +应用名称今后可以随时修改。 + +
+ +### 从配置文件创建 + +如果你从社区或其它人那里获得了一个模版,你可以点击**从应用配置文件创建**,上传后可加载对方应用中的大部分设置项(但目前不包括数据集)。 + +### 你的应用 + +{% hint style="info" %} +如果你是第一次使用,这里会提示你输入 OpenAI 的 API 密钥。一个可正常使用的 LLM 密钥是使用 Dify 的前提,如果你还没有请前往[申请](https://platform.openai.com/account/api-keys)一个。 +{% endhint %} + +

输入 OpenAI Key

+ +**创建应用**或**选择一个已有应用**后,会来到一个显示应用概况的**应用概览页**。你可以在这里直接访问你的 WebApp 或查看 API 状态,也可以开启或关闭它们。 + +**统计**显示了该应用一段时间内的用量、活跃用户数和 LLM 调用消耗—这使你可以持续改进应用运营的经济性,我们将逐步提供更多有用的可视化能力,请**告诉我们**你想要的。 + +1. 全部消息数(Total Messages),反映 AI 每天的互动总次数,每回答用户一个问题算一条 Message。提示词编排和调试的会话不计入。 +2. 活跃用户数(Active Users),与 AI 有效互动,即有一问一答以上的唯一用户数。提示词编排和调试的会话不计入。 +3. 平均会话互动数(Average Session Interactions),反映每个会话用户的持续沟通次数,如果用户与 AI 问答了 10 轮,即为 10。该指标反映了用户粘性。仅在对话型应用提供。 +4. 用户满意度(User Satisfaction Rate),每 1000 条消息的点赞数。反应了用户对回答十分满意的比例。 +5. 平均响应时间(Average Response Time),衡量 AI 应用处理和回复用户请求所花费的平均时间,单位为毫秒,反映性能和用户体验。仅在文本型应用提供。 +6. 费用消耗(Token Usage),反映每日该应用请求语言模型的 Tokens 花费,用于成本控制。 + +### 接下来 + +* 试试你的 **WebApp** +* 逛一逛左侧的配置、开发和 Logs 页 +* 试着参考**案例**配置一个应用 +* 如果你具备开发前端应用的能力,请查阅 **API 文档** + + + diff --git a/zh_CN/application/developing-with-apis.md b/zh_CN/application/developing-with-apis.md new file mode 100644 index 0000000..4f8c39f --- /dev/null +++ b/zh_CN/application/developing-with-apis.md @@ -0,0 +1,74 @@ +# 基于 APIs 开发 + +Dify 基于“**后端即服务**”理念为所有应用提供了 API,为 AI 应用开发者带来了诸多便利。通过这一理念,开发者可以直接在前端应用中获取大型语言模型的强大能力,而无需关注复杂的后端架构和部署过程。 + +### 使用 Dify API 的好处 + +* 让前端应用直接安全地调用 LLM 能力,省去后端服务的开发过程 +* 在可视化的界面中设计应用,并在所有客户端中实时生效 +* 对 LLM 供应商的基础能力进行了良好封装 +* 随时切换 LLM 供应商,并对 LLM 的密钥进行集中管理 +* 在可视化的界面中运营你的应用,例如分析日志、标注及观察用户活跃 +* 持续为应用提供更多工具能力、插件能力和数据集 + +### 如何使用 + +选择一个应用,在应用(Apps)左侧导航中可以找到**访问 API(API Access)**。在该页面中你可以查看 Dify 提供的 API 文档,并管理可访问 API 的凭据。 + +

访问 API 文档

+ +例如你是一个咨询公司的开发部分,你可以基于公司的私有数据库提供 AI 能力给终端用户或开发者,但开发者无法掌握你的数据和 AI 逻辑设计,从而使得服务可以安全、可持续的交付并满足商业目的。 + +{% hint style="warning" %} +在最佳实践中,API 密钥应通过后端调用,而不是直接以明文暴露在前端代码或请求中,这样可以防止你的应用被滥用或攻击。 +{% endhint %} + +你可以为一个应用**创建多个访问凭据**,以实现交付给不同的用户或开发者。这意味着 API 的使用者虽然使用了应用开发者提供的 AI 能力,但背后的 Promp 工程、数据集和工具能力是经封装的。 + +### 文本生成型应用 + +可用于生成高质量文本的应用,例如生成文章、摘要、翻译等,通过调用 completion-messages 接口,发送用户输入得到生成文本结果。用于生成文本的模型参数和提示词模版取决于开发者在 Dify 提示词编排页的设置。 + +你可以在**应用 -> 访问 API** 中找到该应用的 API 文档与范例请求。 + +例如,创建文本补全信息的 API 的调用示例: + +``` +curl --location --request POST 'https://api.dify.dev/v1/completion-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "Hi", + "response_mode": "streaming", + "user": "abc-123" +}' + +``` + +### 对话型应用 + +可用于大部分场景的对话型应用,采用一问一答模式与用户持续对话。要开始一个对话请调用 chat-messages 接口,通过继续传入返回的 conversation_id 可持续保持该会话。 + +你可以在**应用 -> 访问 API** 中找到该应用的 API 文档与范例请求。 + +例如,发送对话信息的 API的调用示例: + +``` +curl --location --request POST 'https://api.dify.dev/v1/chat-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "eh", + "response_mode": "streaming", + "conversation_id": "1c7e55fb-1ba2-4e10-81b5-30addcea2276" + "user": "abc-123" +}' + +``` + + + + + diff --git a/zh_CN/application/launch-your-webapp-quickly.md b/zh_CN/application/launch-your-webapp-quickly.md new file mode 100644 index 0000000..c0e7e5f --- /dev/null +++ b/zh_CN/application/launch-your-webapp-quickly.md @@ -0,0 +1,46 @@ +# 快速发布 AI 站点 + +使用 Dify 创建 AI 应用的一个好处在于,你可以在几分钟内就发布一个可供用户使用的 Web 应用,该应用将根据你的 Prompt 编排工作。 + +* 如果你使用的是自部署的开源版,该应用将运行在你的服务器上 +* 如果你使用的是云服务,该应用将托管至 Dify.app + +### 发布 AI 站点 + +在应用概览页中,你可以找到关于AI 站点(WebApp)的卡片。只需开启 WebApp 的访问,就可以得到一个可分享可分享给用户的链接了。 + +

分享应用

+ +我们对以下两种应用均提供了一个漂亮的 WebApp 界面: + +* 文本生成型(前往预览) +* 对话型(前往预览) + +### 设置你的 AI 站点 + +点击 WebApp 卡片上的**设置**按钮,可以为 AI 站点配置一些选项。它们都会被最终用户所见: + +* 图标 +* 名称 +* 应用描述 +* 界面语言 +* 版权信息 +* 隐私政策链接 + +### 嵌入你的 AI 站点 + +Dify 支持将你的 AI 应用嵌入到你的业务网站中,你可以使用该能力在几分钟内制作具有业务数据的官网 AI 客服、业务知识问答等应用。点击 WebApp 卡片上的嵌入按钮,复制嵌入代码,粘贴到你网站的目标位置。 + +* iframe 标签方式 + + 将 iframe 代码复制到你网站用于显示 AI 应用的标签中,如 `
`、`
` 等标签。 + +* script 标签方式 + + 将 script 代码复制到你网站 `` 或 `` 标签中。 + +
+ +例如,将 script 代码粘贴到官网的 `` 处,你将得到一个官网 AI 机器人: + +
diff --git a/zh_CN/application/logs.md b/zh_CN/application/logs.md new file mode 100644 index 0000000..c37b382 --- /dev/null +++ b/zh_CN/application/logs.md @@ -0,0 +1,34 @@ +# 日志与标注 + +{% hint style="warning" %} +请确保您的应用在收集用户数据时遵守当地法规。通常的做法是发布隐私政策并征得用户的同意。 +{% endhint %} + +**日志(Logs)**功能用以观察与标记 Dify 应用的工作表现,Dify 为应用的全部交互过程记录了日志,无论你是通过 WebApp 或 API 调用的,如果你是 Prompt 工程师或 LLM 运营人员,它将为你提供可视化的 LLM 应用运营体验。 + +### 使用日志控制台 + +你可以在应用的左侧导航找到**日志(Logs)**,该页面通常展示了: + +* 所选时间内用户与用户的交互记录 +* 用户输入与 AI 输出的结果,对于对话型应用通常是一系列的消息流 +* 用户、运营人员的评价,以及运营人员的改进标注 + +注意,如果团队中的多位协作者对同一条日志进行标注,最后标注的记录将覆盖此前的标注。 + +### 改进标注 + +{% hint style="info" %} +这些标记将在 Dify 的后续版本中供模型微调使用,以提升模型的准确性与回复风格,当前预览版仅支持标记。 +{% endhint %} + +

标记日志以改进

+ +点击一条日志会在界面右侧打开日志详情面板,在该面板中运营人员可以对一次互动进行标注: + +* 对表现较佳的消息点赞 +* 对表现不佳的消息点踩 +* 对改进的结果标记改进回复,这代表了你期望 AI 回复的文本 + +注意,如果团队中的多位管理员对同一条日志进行标注,最后标注的记录将覆盖此前的标注。 + diff --git a/zh_CN/application/prompt-engineering/README.md b/zh_CN/application/prompt-engineering/README.md new file mode 100644 index 0000000..3e5df89 --- /dev/null +++ b/zh_CN/application/prompt-engineering/README.md @@ -0,0 +1,31 @@ +--- +description: 掌握如何使用 Dify 编排应用和实践 Prompt Engineering,通过内置的两种应用类型,搭建出高价值的 AI 应用。 +--- + +# 设计提示词&编排应用 + +Dify 的核心理念是可声明式的定义 AI 应用,包括 Prompt、上下文和插件等等的一切都可以通过一个 YAML 文件描述(这也是为什么称之为 Dify )。最终呈现的是单一 API 或开箱即用的 WebApp。 + +与此同时,Dify 提供了一个易用的 Prompt 编排界面,开发者能以 Prompt 为基础所见即所得的编排出各种应用特性。听上去是不是很简单? + +无论简单或是复杂的 AI 应用,好的 Prompt 可以有效提高模型输出的质量,降低错误率,并满足特定场景的需求。Dify 已提供对话型和文本生成型两种常见的应用形态,这个章节会带你以可视化的方式完成 AI 应用的编排, + +### 应用编排的步骤 + +1. 确定应用场景和功能需求 +2. 设计并测试 Prompts 与模型参数 +3. 编排 Prompts 与用户输入 +4. 发布应用 +5. 观测并持续迭代 + +### 了解应用类型的区别 + +Dify 中的文本生成型应用与对话型应用在 Prompt 编排上略有差异,对话型应用需结合“对话生命周期”来满足更复杂的用户情景和上下文管理需求。 + +Prompt Engineering 已发展为一个潜力巨大,值得持续探索的学科。请继续往下阅读,学习两种类型应用的编排指南。 + +### 扩展阅读 + +1. [Learn Prompting](https://learnprompting.org/zh-Hans/) +2. [ChatGPT Prompt Engineering for Developers](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/) +3. [Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts) diff --git a/zh_CN/application/prompt-engineering/conversation-application.md b/zh_CN/application/prompt-engineering/conversation-application.md new file mode 100644 index 0000000..f1cff5f --- /dev/null +++ b/zh_CN/application/prompt-engineering/conversation-application.md @@ -0,0 +1,83 @@ +# 对话型应用 + +对话型应用采用一问一答模式与用户持续对话。 + +### 适用场景 + +对话型应用可以用在客户服务、在线教育、医疗保健、金融服务等领域。这些应用可以帮助组织提高工作效率、减少人工成本和提供更好的用户体验。 + +### 如何编排 + +对话型应用的编排支持:对话前提示词,变量,上下文,开场白和下一步问题建议。 + +下面边以做一个 **面试官** 的应用为例来介绍编排对话型应用。 + +#### 第 1 步 创建应用 + +在首页点击 “创建应用” 按钮创建应用。填上应用名称,应用类型选择**对话型应用**。 + +

创建应用

+ +#### 第 2 步 编排应用 + +应用成功后会自动跳转到应用概览页。点击左侧菜单:**提示词编排** 来编排应用。 + +

编排应用

+ +**2.1 填写提示词** + +提示词用于对 AI 的回复做出一系列指令和约束。可插入表单变量,例如 `{{input}}`。提示词中的变量的值会替换成用户填写的值。 + +我们在这里填写的提示词是: + +> 我想让你担任\{{jobName\}}面试官。我将成为候选人,您将向我询问\{{jobName\}}开发工程师职位的面试问题。我希望你只作为面试官回答。不要一次写出所有的问题。我希望你只对我进行采访。问我问题,等待我的回答。不要写解释。像面试官一样一个一个问我,等我回答。 +> +> 当我回准备好了后,开始提问。 + +![](<../../.gitbook/assets/image (80).png>) + +为了更好的体验,我们加上对话开场白:`你好,{{name}}。我是你的面试官,Bob。你准备好了吗?` + +添加开场白的方法是,点击左上角的 “添加功能” 按钮,打开 “对话开场白” 的功能: + +
+ +然后编辑开场白: + +![](<../../.gitbook/assets/image (28).png>) + +#### 2.2 添加上下文 + +如果应用想基于私有的上下文对话来生成内容。可以用我们[数据集](../../advanced/datasets/)功能。在上下文中点 “添加” 按钮来添加数据集。 + +![](<../../.gitbook/assets/image (108).png>) + +**2.3 调试** + +我们在右侧填写 用户输入,输入内容进行调试。 + +![](<../../.gitbook/assets/image (67).png>) + + + +如果结果不理想,可以调整提示词和模型参数。点右上角点 模型名称 来设置模型的参数: + +![](<../../.gitbook/assets/image (76).png>) + +我们支持模型 gpt-4。 + + + +**2.4 发布** + +调试好应用后,点击右上角的 **“发布”** 按钮来保存当前的设置。 + +### 分享应用 + +在概览页可以找到应用的分享地址。点 “预览按钮” 预览分享出去的应用。点 “分享” 按钮获得分享的链接地址。点 “设置” 按钮设置分享出去的应用信息。 + +
+ + + +如果想定制化分享出去的应用,可以 Fork 我们的开源的 [WebApp 的模版](https://github.com/langgenius/webapp-conversation)。基于模版改成符合你的情景与风格需求的应用。 diff --git a/zh_CN/application/prompt-engineering/text-generation-application.md b/zh_CN/application/prompt-engineering/text-generation-application.md new file mode 100644 index 0000000..03f3eac --- /dev/null +++ b/zh_CN/application/prompt-engineering/text-generation-application.md @@ -0,0 +1,76 @@ +# 文本生成型应用 + +文本生成类应用是一种能够根据用户提供的提示,自动生成高质量文本的应用。它可以生成各种类型的文本,例如文章摘要、翻译等。 + +### 适用场景 + +文本生成类应用适用于需要大量文本创作的场景,例如新闻媒体、广告、SEO、市场营销等。它可以为这些行业提供高效、快速的文本生成服务,降低人力成本并提高生产效率。 + +### 如何编排 + +文本生成应用的编排支持:前缀前提示词,变量,上下文和生成更多类似的内容。 + +这边以做一个翻译应用为例来介绍编排文本生成型应用。 + +#### 第 1 步 创建应用 + +在首页点击 “创建应用” 按钮创建应用。填上应用名称,应用类型选择**文本生成应用**。 + +

创建应用

+ +#### 第 2 步 编排应用 + +应用成功后会自动跳转到应用概览页。点击左侧菜单:**提示词编排** 来编排应用。 + +
+ +**2.1 填写前缀提示词** + +提示词用于对 AI 的回复做出一系列指令和约束。可插入表单变量,例如 `{{input}}`。提示词中的变量的值会替换成用户填写的值。 + +我们在这里填写的提示词是:`将内容翻译成:{{language}}。内容如下:` + +![](<../../.gitbook/assets/image (65).png>) + +#### 2.2 添加上下文 + +如果应用想基于私有的上下文对话来生成内容。可以用我们[数据集](../../advanced/datasets/)功能。在上下文中点 “添加” 按钮来添加数据集。 + +![](<../../.gitbook/assets/image (88).png>) + +#### 2.3 添加功能:生成更多类似的 + +生成更多类似可以一次生成多条文本,可在此基础上编辑并继续生成。点击左上角的 “添加功能” 来打开该功能。 + +

打开更多类似的功能

+ +**2.4 调试** + +我们在右侧 输入变量 和 查询内容 进行调试。点 **“运行”** 按钮 查看运行的结果。 + +![](<../../.gitbook/assets/image (22).png>) + +如果结果不理想,可以调整提示词和模型参数。点右上角点 模型名称 来设置模型的参数: + +
+ +

调整模型参数

+ +
+ +**2.5 发布** + +调试好应用后,点击右上角的 **“发布”** 按钮来保存当前的设置。 + +### 分享应用 + +在概览页可以找到应用的分享地址。点 “预览按钮” 预览分享出去的应用。点 “分享” 按钮获得分享的链接地址。点 “设置” 按钮设置分享出去的应用信息。 + +
+ +如果想定制化分享出去的应用,可以 Fork 我们的开源的[ WebApp 的模版](https://github.com/langgenius/webapp-text-generator)。基于模版改成符合你的情景与风格需求的应用。 + + + + + diff --git a/zh_CN/community/data-security.md b/zh_CN/community/data-security.md new file mode 100644 index 0000000..b886ece --- /dev/null +++ b/zh_CN/community/data-security.md @@ -0,0 +1,13 @@ +# 数据安全 + +感谢您对 Dify 产品的关注,Dify 非常重视您的数据安全。请参阅我们的[【隐私协议】](https://docs.dify.ai/v/zh-hans/user-agreement/privacy-policy) 。 + +可以透露的是:Dify 的云服务位于美国 Azure,仅极少数获得授权的人员经审批后才可访问用户的数据。另外,我们的代码均在 GitHub 开源,如果您对云服务有安全担忧可以使用自部署版本。 + +由于我们的产品还处于早期阶段,或许我们仍有一些工作做得不足,但我们有计划获得 SOC2 和 ISO27001 认证。 + +如果您有商业化方面的疑问,请联系 business@dify.ai + +在 Dify 的自部署版本中,仅有一处调用 Dify 服务器,即检查当前版本更新 API 的功能。且必须由管理员在后台触发。其它没有任何使用到远程服务器的技术,因此您可以安全使用。 + +如果您仍有疑虑,可以通过设置防火墙等方式对数据进行保护。 diff --git a/zh_CN/community/open-source.md b/zh_CN/community/open-source.md new file mode 100644 index 0000000..78c3658 --- /dev/null +++ b/zh_CN/community/open-source.md @@ -0,0 +1,17 @@ +# 开源协议 + +## Dify 开源许可证 + +Dify 项目在 Apache License 2.0 许可下开源,同时包含以下附加条件: + +1. Dify 允许被用于商业化,例如作为其他应用的“后端即服务”使用,或者作为应用开发平台提供给企业。然而,当满足以下条件时,必须联系生产者获得商业许可: + * 多租户 SaaS 服务:除非获得 Dify 的明确书面授权,否则不得使用 Dify.AI 的源码来运营与 Dify.AI 服务版类似的多租户 SaaS 服务。 + * LOGO 及版权信息:在使用 Dify 的过程中,不得移除或修改 Dify 控制台内的 LOGO 或版权信息。 + +请通过电子邮件 [business@dify.ai](mailto:business@dify.ai) 联系我们咨询许可事宜。 + +2. 作为贡献者,你应同意你贡献的代码: + * 生产者有权将开源协议调整为更严格或更宽松的形式。 + * 可用于商业目的,例如 Dify 的云业务。 + +除此之外,所有其他权利和限制均遵循 Apache License 2.0。如果你需要更多详细信息,可以参考 Apache License 2.0 的完整版本。本产品的交互设计受到外观专利保护。© 2023 LangGenius, Inc. diff --git a/zh_CN/community/support.md b/zh_CN/community/support.md new file mode 100644 index 0000000..09eb2f9 --- /dev/null +++ b/zh_CN/community/support.md @@ -0,0 +1,19 @@ +# 寻求支持 + +如果你阅读这套文档时,仍然对产品使用存在疑问和建议,可尝试以下方式寻求支持。我们的团队与社区会竭尽所能的为你提供帮助。 + +### 社区支持 + +{% hint style="info" %} +请不要将涉及你的 Dify 账户信息与其它密钥信息发至社区,我们的支持人员也不会索要您的账户信息。 +{% endhint %} + +* 在 [Github](https://github.com/langgenius/dify) 上提交 Issue +* 加入 [Discord ](https://discord.gg/FngNHpbcY7)社群 +* 发邮件至 [support@dify.ai](mailto:support@dify.ai) + +### 联系我们 + +适用于除了寻求产品支持以外的其他事宜。 + +* 发邮件至 [hello@dify.ai](mailto:hello@dify.ai) diff --git a/zh_CN/explore/chat.md b/zh_CN/explore/chat.md new file mode 100644 index 0000000..eab3e7e --- /dev/null +++ b/zh_CN/explore/chat.md @@ -0,0 +1,62 @@ +# 智聊 + +智聊是用于探索 Dify 能力边界的对话型应用。 + +在我们和自然语言大模型对话时,经常会遇到回答内容过期或者失效的情况,这是由于大模型的训练数据较老以及无联网能力导致的,智聊在大模型的基础上,利用代理(Agent) 的能力以及一些工具为大模型赋予了联网实时查询的能力。 + +
+ +智聊支持使用插件和数据集。 + +### 使用插件 + +大语言模型不能联网和调用外部工具。但这不能满足实际的使用场景,比如: + +* 我们想知道今天的天气时,需要联网。 +* 我们想总结某个网页的内容时,需要使用外部工具:读取网页内容。 + +使用代理模式,可以解决上面的问题:当大语言模型没法解答用户的问题时,会尝试使用现有的插件来解答问题。 + +{% hint style="info" %} +在 Dify 中,对于不同的模型,我们用了不同的代理策略。OpenAI 的模型使用的代理策略是 **GPT function call。**其他模型使用是 **`ReACT`**`。目前测试的体验是`**GPT function call** 的效果更好`。想了解更多,可以阅读下面的链接:` + +* [Function calling and other API updates](https://openai.com/blog/function-calling-and-other-api-updates) +* [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) + + +{% endhint %} + +目前我们支持如下插件: + +* 谷歌搜索。该插件会搜索谷歌找答案。 +* 解析链接。该插件会读取链接的网页内容。 +* 维基百科。该插件会搜索维基百科找答案。 + +我们可以在对话开始前选择本次对话需要的插件。 + +
+ +如果使用谷歌搜索插件,需要配置 SerpAPI key。 + +
+ +配置的入口: + +
+ +### 使用数据集 + +智聊支持数据集。选择了数据集后,用户问的问题和数据集内容有关,模型会从数据集中找答案。 + +我们可以在对话开始前选择本次对话需要的数据集。 + +
+ +### 思考的过程 + +思考的过程指模型使用插件和数据集的过程。我们可以在每个回答中看到思考的过程。 + +
+ + + diff --git a/zh_CN/getting-started/cloud.md b/zh_CN/getting-started/cloud.md new file mode 100644 index 0000000..eac36ab --- /dev/null +++ b/zh_CN/getting-started/cloud.md @@ -0,0 +1,14 @@ +# 使用云服务 + +{% hint style="info" %} +**提示:** Dify 目前正在 Beta 测试阶段,如文档与产品存在不一致,请以产品实际体验为准。 +{% endhint %} + +Dify 为所有人提供了[云服务](http://cloud.dify.ai),你无需自己部署即可使用 Dify 的完整功能。要使用 Dify 云服务,你需要有一个 GitHub 或 Google 账号。 + +1. 登录 [Dify 云服务](https://cloud.dify.ai),创建一个或加入已有的 Workspace +2. 配置你的模型供应商,或使用我们提供的托管模型供应商 +3. 可以[创建应用](../application/creating-an-application.md)了! + +目前我们没有收费计划,如果你喜欢请将这款 LLMOps 产品介绍给身边的朋友😄。 + diff --git a/zh_CN/getting-started/faq/README.md b/zh_CN/getting-started/faq/README.md new file mode 100644 index 0000000..4eef2d4 --- /dev/null +++ b/zh_CN/getting-started/faq/README.md @@ -0,0 +1,10 @@ +# 常见问题 + +[本地部署相关常见问题](https://docs.dify.ai/v/zh-hans/getting-started/faq/install-faq) + +[LLM 配置与使用相关常见问题](https://docs.dify.ai/v/zh-hans/getting-started/faq/llms-use-faq) + +[API 使用常见问题](api-use-faq.md) + + + diff --git a/zh_CN/getting-started/faq/api-use-faq.md b/zh_CN/getting-started/faq/api-use-faq.md new file mode 100644 index 0000000..04428b9 --- /dev/null +++ b/zh_CN/getting-started/faq/api-use-faq.md @@ -0,0 +1,5 @@ +# API 使用 + +## 什么是Bearer Token? + +Bearer 身份验证(也称为令牌认证)是一种涉及名为 bearer 令牌的安全令牌的HTTP认证方案。"Bearer身份验证"的名称可以理解为"给予此令牌的持有者访问权限"。bearer令牌是一串加密的字符串,通常由服务器在响应登录请求时生成。当客户端向受保护资源发出请求时,必须在 Authorization 头中发送此令牌: `Authorization: Bearer ` Bearer 身份验证方案最初是作为 OAuth 2.0 的一部分在RFC 6750 中创建的,但有时也单独使用。与基本认证类似,Bearer认证只应通过 HTTPS(SSL)使用。 diff --git a/zh_CN/getting-started/faq/install-faq.md b/zh_CN/getting-started/faq/install-faq.md new file mode 100644 index 0000000..1f22b5c --- /dev/null +++ b/zh_CN/getting-started/faq/install-faq.md @@ -0,0 +1,133 @@ +# 本地部署相关 + + + +### 1. 本地部署初始化后,密码错误如何重置? + +若使用 docker compose 方式部署,可执行以下命令进行重置 + +``` +docker exec -it docker-api-1 flask reset-password +``` + +输入账户 email 以及两次新密码即可。 + +### 2. 本地部署日志中报 File not found 错误,如何解决? + +``` +ERROR:root:Unknown Error in completion +Traceback (most recent call last): + File "/www/wwwroot/dify/dify/api/libs/rsa.py", line 45, in decrypt + private_key = storage.load(filepath) + File "/www/wwwroot/dify/dify/api/extensions/ext_storage.py", line 65, in load + raise FileNotFoundError("File not found") +FileNotFoundError: File not found +``` + +该错误可能是由于更换了部署方式,或者 `api/storage/privkeys` 删除导致,这个文件是用来加密大模型密钥的,因此丢失后不可逆。可以使用如下命令进行重置加密公私钥: + +* Docker compose 部署 + + ``` + docker exec -it docker-api-1 flask reset-encrypt-key-pair + ``` +* 源代码启动 + + 进入 api 目录 + + ``` + flask reset-encrypt-key-pair + ``` + + 按照提示进行重置。 + +### **3. 安装时后无法登录,登录成功,但后续接口均提示 401?** + +这可能是由于切换了域名/网址,导致前端和服务端跨域。跨域和身份会涉及到两方面的配置: + +1. CORS 跨域配置 + 1. `CONSOLE_CORS_ALLOW_ORIGINS` + + 控制台 CORS 跨域策略,默认为 `*`,即所有域名均可访问。 + 2. `WEB_API_CORS_ALLOW_ORIGINS` + + WebAPP CORS 跨域策略,默认为 `*`,即所有域名均可访问。 +2. COOKIE 策略配置 + + Cookie 策略分为三个配置 `HttpOnly`、`SameSite` 和 `Secure`。 + + 1. `HttpOnly`:默认为 true,正常不需要修改,用于防止 XSS 攻击,即 JS 无法获取 Cookie 内容,只能在 Http 请求中带上。 + 2. `SameSite`:分为三档,Strict、Lax 和 None,而由于 Dify 需要在 Github、Google 外部域名授权回调时能够从 Cookie 获取身份信息,因此只能在 Lax 和 None 之间选择,其中 None 完全可以跨域访问。 + 3. `Secure`:该参数限制是否服务端接口必须在 https 时下才可将 Cookie 存到本地,该参数在跨域时必须为 true(本地 localhost / 127.0.0.1 不同端口除外),否则浏览器不予通过。 + +#### 推荐配置 + +根据上述配置说明,我们推荐这三种场景下的配置: + +* 本地调试(默认策略) + + 开发模式同域策略。 支持 HTTP/HTTPS 协议,但需要保证前端页面和接口同域。 + +
WEB_API_CORS_ALLOW_ORIGINS: '*'
+    CONSOLE_CORS_ALLOW_ORIGINS: '*'
+    COOKIE_HTTPONLY: 'true'
+    COOKIE_SAMESITE: 'Lax'
+    COOKIE_SECURE: 'false'
+    
+* 跨域策略(请勿应在生产) + + 服务端与 web 客户端跨域,服务端必须为 https。 由于 SameSite=None 必须配合 Secure=true,因此服务端必须为 `https` 协议才能实现跨域访问,可以用在服务端在远程并且提供 `https` 协议支持,或者本地单独启动服务端和前端项目(localhost,但不同端口,实测可用,虽然提示 warning)。 + + ``` + WEB_API_CORS_ALLOW_ORIGINS: 'https://your-domain-for-web-app' + CONSOLE_CORS_ALLOW_ORIGINS: 'https://your-domain-for-console' + COOKIE_HTTPONLY: 'true' + COOKIE_SAMESITE: 'None' + COOKIE_SECURE: 'true' + ``` +* 生产策略 + + 严格模式。 由于部分第三方集成需要支持回调并带着 cookie 信息,因此不能使用最高的 Strict 策略,因此需要严格限制 CORS 域名,以及设置 cookie 策略为 SameSite=Lax, Secure=true。 + + ``` + WEB_API_CORS_ALLOW_ORIGINS: 'https://your-domain-for-web-app' + CONSOLE_CORS_ALLOW_ORIGINS: 'https://your-domain-for-console' + COOKIE_HTTPONLY: 'true' + COOKIE_SAMESITE: 'Lax' + COOKIE_SECURE: 'true' + ``` + +#### 不可用场景 + +在前后端跨域,且服务端为 http 协议时,无任何 Cookie 策略可以支持该场景,请调整后端为 HTTPS 协议或者设置为同域。 + +### **4. 启动后页面一直在 loading,查看请求提示 CORS 错误?** + +这可能是由于切换了域名/网址,导致前端和服务端跨域,请将 `docker-compose.yml` 中所有的以下配置项改为新的域名: + +`CONSOLE_API_URL:` 控制台 API 的后端 URL。 +`CONSOLE_WEB_URL:` 控制台网页的前端 URL。 +`SERVICE_API_URL:` 服务 API 的 URL。 +`APP_API_URL:` WebApp API 的后端 URL。 +`APP_WEB_URL:` WebApp 的 URL。 + +更多信息,请查看:[环境变量](../install-self-hosted/environments.md) + +### 5. 部署后如何升级版本? + +如果你是通过镜像启动,请重新拉取最新镜像完成升级。 如果你是通过源码启动,请拉取最新代码,然后启动,完成升级。 + +### 6. 使用 Notion 导入时如何配置环境变量 + +**问: Notion 的集成配置地址是什么?** + +答: [https://www.notion.so/my-integrations](https://www.notion.so/my-integrations) + +**问: 需要配置哪些环境变量?** + +答: 进行私有化部署时,请设置以下配置: + +1. **`NOTION_INTEGRATION_TYPE`** :该值应配置为(**public/internal**)。由于 Notion 的 Oauth 重定向地址仅支持 https,如果在本地部署,请使用 Notion 的内部集成。 +2. **`NOTION_CLIENT_SECRET`** : Notion OAuth 客户端密钥(用于公共集成类型)。 +3. **`NOTION_CLIENT_ID`** : OAuth 客户端ID(用于公共集成类型)。 +4. **`NOTION_INTERNAL_SECRET`** : Notion 内部集成密钥,如果 `NOTION_INTEGRATION_TYPE` 的值为 **internal**,则需要配置此变量。 diff --git a/zh_CN/getting-started/faq/llms-use-faq.md b/zh_CN/getting-started/faq/llms-use-faq.md new file mode 100644 index 0000000..d286228 --- /dev/null +++ b/zh_CN/getting-started/faq/llms-use-faq.md @@ -0,0 +1,52 @@ +# LLM 配置与使用 + +### 1. 如何在国内环境中使用 OpenAI 代理服务器进行访问? + +可以在 `docker-compose.yaml` 中的 api 和 worker 服务都加一条 environments 环境变量记录。或者源代码启动的,可以在 api/.env 中增加下方的环境变量。 + +```Bash +OPENAI_API_BASE: 'https:///v1' +``` + +### **2. 如何选择基础模型?** + +* gpt-3.5-turbo + gpt-3.5-turbo 是 gpt-3 模型系列的升级版,它比 gpt-3 更强大,可以处理更复杂的任务。 它在理解长文本和跨文档推理方面有重大提高。 gpt-3.5 turbo 可以产生更连贯和更具说服力的文本。它在摘要、翻译和创意写作方面也有很大提高。 擅长:**长文本理解、跨文档推理、摘要、翻译、创意写作** + +* gpt-4 + gpt-4 是最新最强大的 Transformer 语言模型。它拥有预训练的参数量增至约 200 亿,这使其在所有语言任务上都达到了最高水平,特别是在需要深入理解和生成长、复杂响应的任务上。gpt-4 可以处理人类语言的所有方面,包括理解抽象概念和跨页面的推理。gpt-4 是第一个真正的通用语言理解系统,它可以胜任人工智能领域内的任何自然语言处理任务。擅长: **所有 NLP 任务,语言理解,长文本生成,跨文档推理,抽象概念理解**具体可参考:https://platform.openai.com/docs/models/overview + + +### **3. 为什么建议 max_tokens 设置小一点?** + +因为在自然语言处理中,较长的文本输出通常需要更长的计算时间和更多的计算资源。因此,限制输出文本的长度可以在一定程度上降低计算成本和计算时间。例如设置:max_tokens=500 ,表示最多只考虑输出文本的前 500 个 token,而超过这个长度的部分将会被丢弃。这样做的目的是保证输出文本的长度不会超过 LLM 的接受范围,同时还可以充分利用计算资源,提高模型的运行效率。另一方面,更多的情况是,限制 max_tokens 能够增加 prompt 的长度,如 gpt-3.5-turbo 的限制为 4097 tokens,如果设置 max_tokens=4000,那么 prompt 就只剩下 97 tokens 可用,如果超过就会报错。 + + +### **4. 数据集长文本如何切分比较合理?** + +在一些自然语言处理应用中,通常会将文本按照段落或者句子进行切分,以便更好地处理和理解文本中的语义和结构信息。最小切分单位取决于具体的任务和技术实现。例如: + +* 对于文本分类任务,通常将文本按照句子或者段落进行切分 +* 对于机器翻译任务,则需要将整个句子或者段落作为切分单位。 + +最后,还需要进行实验和评估来确定最合适的 embedding 技术和切分单位。可以在测试集上 / 命中测试比较不同技术和切分单位的性能表现,并选择最优的方案。 + + +### 5. 我们在获取数据集分段时用的什么距离函数? + +我们使用[余弦相似度](https://en.wikipedia.org/wiki/Cosine_similarity)。距离函数的选择通常无关紧要。OpenAI 嵌入被归一化为长度 1,这意味着: + +仅使用点积可以稍微更快地计算余弦相似度 + +余弦相似度和欧几里德距离将导致相同的排名 + +* > 如果将归一化后的嵌入向量用于计算余弦相似度或欧几里德距离,并基于这些相似性度量对向量进行排序,得到的排序结果将是相同的。也就是说,无论是使用余弦相似度还是欧几里德距离来衡量向量之间的相似性,排序后的结果将是一致的。这是因为在归一化后,向量的长度不再影响它们之间的相对关系,只有方向信息被保留下来。因此,使用归一化的向量进行相似性度量时,不同的度量方法将得到相同的排序结果。在向量归一化后,将所有向量的长度缩放到 1,这意味着它们都处于单位长度上。单位向量只描述了方向而没有大小,因为它们的长度恒为 1。_具体原理可问 ChatGPT._ + +当嵌入向量被归一化为长度 1 后,计算两个向量之间的余弦相似度可以简化为它们的点积。因为归一化后的向量长度都为 1,点积的结果就等同于余弦相似度的结果。由于点积运算相对于其他相似度度量(如欧几里德距离)的计算速度更快,因此使用归一化的向量进行点积计算可以稍微提高计算效率。 + +### 6. 如何免费申领讯飞星火、MiniMax 模型的体验额度? + +我们联合大模型厂商向中国用户提供一定的 token 体验额度。通过 Dify **设置 --> 模型供应商 --> 显示更多模型供应商。**在讯飞星火或 MiniMax 图标处点击【免费获取】,如果你在英文界面看不到领取入口,请将产品语言切换成为中文: + +* **讯飞星火:免费领取 300 万 token**,需要从 Dify 的入口进入,完成讯飞星火开放平台的注册(仅限未注册过讯飞星火的手机号),返回 Dify 静候 5 分钟,刷新页面即可在 Dify 页面体现可用额度。 +* **MiniMax:免费领取 100 万 token**,只需点击【免费领取】即可到账额度,无需手动注册流程,不限制是否注册过 MiniMax 账号。 diff --git a/zh_CN/getting-started/install-self-hosted/README.md b/zh_CN/getting-started/install-self-hosted/README.md new file mode 100644 index 0000000..eda0557 --- /dev/null +++ b/zh_CN/getting-started/install-self-hosted/README.md @@ -0,0 +1,14 @@ +# 部署社区版 + +Dify 社区版即开源版本,你可以通过以下两种方式之一部署 Dify 社区版: + +* [Docker Compose 部署](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted/docker-compose) +* [本地源码启动](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted/local-source-code) + +在 GitHub 上查看 [Dify 社区版](https://github.com/langgenius/dify)。 + +### 贡献代码 + +为了确保正确审查,所有代码贡献 - 包括来自具有直接提交更改权限的贡献者 - 都必须提交 PR 请求并在合并分支之前得到核心开发人员的批准。 + +我们欢迎所有人提交 PR!如果您愿意提供帮助,可以在 [贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING_CN.md) 中了解有关如何为项目做出贡献的更多信息。 diff --git a/zh_CN/getting-started/install-self-hosted/docker-compose.md b/zh_CN/getting-started/install-self-hosted/docker-compose.md new file mode 100644 index 0000000..11060ba --- /dev/null +++ b/zh_CN/getting-started/install-self-hosted/docker-compose.md @@ -0,0 +1,60 @@ +# Docker Compose 部署 + +### 前置条件 + +| 操作系统 | 软件 | 说明 | +| -------------------------- | -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| macOS 10.14 or later | Docker Desktop | 将 Docker 虚拟机(VM)设置为使用至少 2 个虚拟 CPU(vCPU)和 8 GB 的初始内存。否则,安装可能会失败。有关更多信息,请参阅[在 Mac 上安装 Docker Desktop](https://docs.docker.com/desktop/mac/install/)。 | +| Linux platforms |

Docker 19.03 or later
Docker Compose 1.25.1 or later

| 请参阅[安装 Docker](https://docs.docker.com/engine/install/) 和[安装 Docker Compose](https://docs.docker.com/compose/install/) 以获取更多信息。 | +| Windows with WSL 2 enabled |

Docker Desktop

| 我们建议将源代码和其他数据绑定到 Linux 容器中时,将其存储在 Linux 文件系统中,而不是 Windows 文件系统中。有关更多信息,请参阅[使用 WSL 2 后端在 Windows 上安装 Docker Desktop](https://docs.docker.com/desktop/windows/install/#wsl-2-backend)。 | + +### Clone Dify + +Clone Dify 源代码至本地 + +```Shell +git clone https://github.com/langgenius/dify.git +``` + +### Start Dify + +进入 dify 源代码的 docker 目录,执行一键启动命令: + +```Shell +cd dify/docker +docker compose up -d +``` + +> 如果您的系统安装了 Docker Compose V2 而不是 V1,请使用 `docker compose` 而不是 `docker-compose`。通过`$ docker compose version`检查这是否为情况。在[这里](https://docs.docker.com/compose/#compose-v2-and-the-new-docker-compose-command)阅读更多信息。 + +部署结果: + +```Shell +[+] Running 7/7 + ✔ Container docker-web-1 Started 1.0s + ✔ Container docker-redis-1 Started 1.1s + ✔ Container docker-weaviate-1 Started 0.9s + ✔ Container docker-db-1 Started 0.0s + ✔ Container docker-worker-1 Started 0.7s + ✔ Container docker-api-1 Started 0.8s + ✔ Container docker-nginx-1 Started +``` + +最后检查是否所有容器都正常运行: + +```Shell +docker compose ps +``` + +包括 3 个业务服务 `api / worker / web`,以及 4 个基础组件 `weaviate / db / redis / nginx`。 + +```Shell +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +docker-api-1 langgenius/dify-api:0.3.2 "/entrypoint.sh" api 4 seconds ago Up 2 seconds 80/tcp, 5001/tcp +docker-db-1 postgres:15-alpine "docker-entrypoint.s…" db 4 seconds ago Up 2 seconds 0.0.0.0:5432->5432/tcp +docker-nginx-1 nginx:latest "/docker-entrypoint.…" nginx 4 seconds ago Up 2 seconds 0.0.0.0:80->80/tcp +docker-redis-1 redis:6-alpine "docker-entrypoint.s…" redis 4 seconds ago Up 3 seconds 6379/tcp +docker-weaviate-1 semitechnologies/weaviate:1.18.4 "/bin/weaviate --hos…" weaviate 4 seconds ago Up 3 seconds +docker-web-1 langgenius/dify-web:0.3.2 "/entrypoint.sh" web 4 seconds ago Up 3 seconds 80/tcp, 3000/tcp +docker-worker-1 langgenius/dify-api:0.3.2 "/entrypoint.sh" worker 4 seconds ago Up 2 seconds 80/tcp, 5001/tcp +``` diff --git a/zh_CN/getting-started/install-self-hosted/environments.md b/zh_CN/getting-started/install-self-hosted/environments.md new file mode 100644 index 0000000..2b40da1 --- /dev/null +++ b/zh_CN/getting-started/install-self-hosted/environments.md @@ -0,0 +1,364 @@ +# 环境变量说明 + +### 公共变量 + +#### EDITION + +部署版本。 + +* `SELF_HOSTED`:自部署版本 + * 仅支持单团队/租户模式 + * 只能使用邮箱和密码方式登录 + * 无试用托管 OpenAI API-Key 功能 +* `CLOUD`:云端版本 + * 支持多团队/租户模式 + * 无法使用邮箱和密码方式登录,仅支持 GitHub、Google 授权登录。 + * 有 200 次试用托管 OpenAI API-Key 功能 + +#### CONSOLE_API_URL + +> 此变量单独作为控制台 API URL 配置,原 CONSOLE_URL 依旧可用。 + +控制台 API 后端 URL,用于拼接授权回调,传空则为同域。范例:`https://api.console.dify.ai`。 + +#### CONSOLE_WEB_URL + +控制台 web **前端** URL,用于拼接部分前端地址,以及 CORS 配置使用,传空则为同域。范例:`https://console.dify.ai` + +> 自 0.3.8 版本起,`CONSOLE_URL` 拆分为 `CONSOLE_API_URL` 和 `CONSOLE_WEB_URL`,`CONSOLE_URL` 依旧可用。 + +#### SERVICE_API_URL + +Service API Url,用于**给前端**展示 Service API Base Url,传空则为同域。范例:`https://api.dify.ai` + +> 自 0.3.8 版本起,`API_URL` 更名为 `SERVICE_API_URL`,`API_URL` 依旧可用。 + +#### APP_API_URL + +WebApp API 后端 Url,用于声明**前端** API 后端地址,传空则为同域。范例:`https://app.dify.ai` + +#### APP_WEB_URL + +WebApp Url,用于**给前端**展示 WebAPP API Base Url,传空则为同域。范例:`https://api.app.dify.ai` + +> 自 0.3.8 版本起,`APP_URL` 拆分为 `APP_API_URL` 和 `APP_WEB_URL`,`APP_URL` 依旧可用。 + +*** + +### 服务端 + +#### MODE + +启动模式,仅使用 docker 启动时可用,源码启动无效。 + +* api + + 启动 API Server。 +* worker + + 启动异步队列 worker。 + +#### DEBUG + +调试模式,默认 false,建议本地开发打开该配置,可防止 monkey patch 导致的一些问题出现。 + +#### FLASK_DEBUG + +Flask 调试模式,开启可在接口输出 trace 信息,方便调试。 + +#### SECRET_KEY + +一个用于安全地签名会话 cookie 并在数据库上加密敏感信息的密钥。初次启动需要设置改变量。可以使用`openssl rand -base64 42`生成一个强密钥。 + +#### DEPLOY_ENV + +部署环境。 + +* PRODUCTION(默认) + + 生产环境。 +* TESTING + + 测试环境,前端页面会有明显颜色标识,该环境为测试环境。 + +#### LOG_LEVEL + +日志输出等级,默认为 INFO。生产建议设置为 ERROR。 + +#### MIGRATION_ENABLED + +当设置为 true 时,会在容器启动时自动执行数据库迁移,仅使用 docker 启动时可用,源码启动无效。源码启动需要在 api 目录手动执行 `flask db upgrade`。 + +#### CHECK_UPDATE_URL + +是否开启检查版本策略,若设置为 false,则不调用 `https://updates.dify.ai` 进行版本检查。由于目前国内无法直接访问基于 CloudFlare Worker 的版本接口,设置该变量为空,可以屏蔽该接口调用。 + +#### OPENAI_API_BASE + +用于更改 OpenAI 基础地址,默认为 https://api.openai.com/v1。 +在国内无法访问 OpenAI,替换国内镜像地址,或者本地模型提供 OpenAI 兼容 API 时,可替换使用。 + +#### 容器启动相关配置 + +仅在使用 docker 镜像或者 docker-compose 启动时有效。 + +* DIFY_BIND_ADDRESS + + API 服务绑定地址,默认:0.0.0.0,即所有地址均可访问。 +* DIFY_PORT + + API 服务绑定端口号,默认 5001。 +* SERVER_WORKER_AMOUNT + + API 服务 Server worker 数量,即 gevent worker 数量,公式:`cpu 核心数 x 2 + 1`可参考:https://docs.gunicorn.org/en/stable/design.html#how-many-workers +* SERVER_WORKER_CLASS + + 默认为 gevent,若为 windows,可以切换为 sync 或 solo。 +* GUNICORN_TIMEOUT + + 请求处理超时时间,默认 200,建议 360,以支持更长的 sse 连接时间。 +* CELERY_WORKER_CLASS + + 和 `SERVER_WORKER_CLASS` 类似,默认 gevent,若为 windows,可以切换为 sync 或 solo。 +* CELERY_WORKER_AMOUNT + + Celery worker 数量,默认为 1,按需设置。 + +#### 数据库配置 + +数据库使用 PostgreSQL,请使用 public schema。 + +* DB_USERNAME:用户名 +* DB_PASSWORD:密码 +* DB_HOST:数据库 host +* DB_PORT:数据库端口号,默认 5432 +* DB_DATABASE:数据库 database +* SQLALCHEMY_POOL_SIZE:数据库连接池大小,默认 30 个连接数,可适当增加。 +* SQLALCHEMY_POOL_RECYCLE:数据库连接池回收时间,默认 3600 秒。 +* SQLALCHEMY_ECHO:是否打印 SQL,默认 false。 + +#### Redis 配置 + +该 Redis 配置用于缓存以及对话时的 pub/sub。 + +* REDIS_HOST:Redis host +* REDIS_PORT:Redis port,默认 6379 +* REDIS_DB:Redis Database,默认为 0,请和 Session Redis、Celery Broker 分开用不同 Database。 +* REDIS_USERNAME:Redis 用户名,默认为空 +* REDIS_PASSWORD:Redis 密码,默认为空,强烈建议设置密码。 +* REDIS_USE_SSL:是否使用 SSL 协议进行连接,默认 false + +#### Session 配置 + +仅 API 服务使用,用于验证接口身份。 + +* SESSION_TYPE: + Session 组件类型 + * redis(默认) + + 选择此项,则需要设置下方 SESSION_REDIS_ 开头的环境变量。 + * sqlalchemy + + 选择此项,则使用当前数据库连接,并使用 sessions 表进行读写 session 记录。 +* SESSION_REDIS_HOST:Redis host +* SESSION_REDIS_PORT:Redis port,默认 6379 +* SESSION_REDIS_DB:Redis Database,默认为 0,请和 Redis、Celery Broker 分开用不同 Database。 +* SESSION_REDIS_USERNAME:Redis 用户名,默认为空 +* SESSION_REDIS_PASSWORD:Redis 密码,默认为空,强烈建议设置密码。 +* SESSION_REDIS_USE_SSL:是否使用 SSL 协议进行连接,默认 false + +#### Celery 配置 + +* CELERY_BROKER_URL + + 格式如下 + +
redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>
+    
+ + 范例:`redis://:difyai123456@redis:6379/1` +* BROKER_USE_SSL + + 若设置为 true,则使用 SSL 协议进行连接,默认 false + +#### CORS 配置 + +用于设置前端跨域访问策略。 + +* CONSOLE_CORS_ALLOW_ORIGINS + + 控制台 CORS 跨域策略,默认为 `*`,即所有域名均可访问。 +* WEB_API_CORS_ALLOW_ORIGINS + + WebAPP CORS 跨域策略,默认为 `*`,即所有域名均可访问。 + +详细配置可参考:[跨域/身份相关指南](https://avytux375gg.feishu.cn/wiki/HyX3wdF1YiejX3k3U2CcTcmQnjg) + +#### Cookie 策略配置 + +用于设置身份校验的 Session Cookie 浏览器策略。 + +* COOKIE_HTTPONLY + + Cookie HttpOnly 配置,默认为 true。 +* COOKIE_SAMESITE + + Cookie SameSite 配置,默认为 Lax。 +* COOKIE_SECURE + + Cookie Secure 配置,默认为 false。详细配置可参考:[跨域/身份相关指南](https://avytux375gg.feishu.cn/wiki/HyX3wdF1YiejX3k3U2CcTcmQnjg) + +#### 文件存储配置 + +用于存储数据集上传的文件、团队/租户的加密密钥等等文件。 + +* STORAGE_TYPE + + 存储设施类型 + + * local(默认) + + 本地文件存储,若选择此项则需要设置下方 `STORAGE_LOCAL_PATH` 配置。 + * s3 + + S3 对象存储,若选择此项则需要设置下方 S3_ 开头的配置。 +* STORAGE_LOCAL_PATH + + 默认为 storage,即存储在当前目录的 storage 目录下。若使用 docker 或 docker-compose 进行部署,请务必将两个容器中 `/app/api/storage` 目录挂载到同一个本机目录,否则可能会出现文件找不到的报错。 +* S3_ENDPOINT:S3 端点地址 +* S3_BUCKET_NAME:S3 桶名称 +* S3_ACCESS_KEY:S3 Access Key +* S3_SECRET_KEY:S3 Secret Key +* S3_REGION:S3 地域信息,如:us-east-1 + +#### 向量数据库配置 + +* VECTOR_STORE + + 可使用的枚举类型包括:`weaviate`、`qdrant`、`pinecone`、`milvus`(后两个暂未开放)其中 `milvus`和 `zilliz` 为同一套配置,均为 `milvus`。 +* WEAVIATE_ENDPOINT + + Weaviate 端点地址,如:`http://weaviate:8080`。 +* WEAVIATE_API_KEY + + 连接 Weaviate 使用的 api-key 凭据。 +* WEAVIATE_BATCH_SIZE + + Weaviate 批量创建索引 Object 的数量,默认 100。可参考此文档:https://weaviate.io/developers/weaviate/manage-data/import#how-to-set-batch-parameters +* WEAVIATE_GRPC_ENABLED + + 是否使用 gRPC 方式与 Weaviate 进行交互,开启后性能会大大增加,本地可能无法使用,默认为 true。 +* QDRANT_URL + + Qdrant 端点地址,如:`https://your-qdrant-cluster-url.qdrant.tech/` +* QDRANT_API_KEY + + 连接 Qdrant 使用的 api-key 凭据。 +* PINECONE_API_KEY + + 连接 Pinecone 使用的 api-key 凭据。 +* PINECONE_ENVIRONMENT + + Pinecone 所在的额环境,如:`us-east4-gcp` +* MILVUS_HOST + + Milvus host 配置。 +* MILVUS_PORT + + Milvus post 配置。 +* MILVUS_USER + + Milvus user 配置,默认为空。 +* MILVUS_PASSWORD + + Milvus 密码配置,默认为空。 +* MILVUS_USE_SECURE + + Milvus 是否使用 SSL 连接,默认 false。 + +#### 数据集配置 + +* UPLOAD_FILE_SIZE_LIMIT + + 上传文件大小限制,默认 15M。 +* UPLOAD_FILE_BATCH_LIMIT + + 可批量上传文件数,默认 5。 + +#### Sentry 配置 + +用于应用监控和错误日志跟踪。 + +* SENTRY_DSN + + Sentry DSN 地址,默认为空,为空时则所有监控信息均不上报 Sentry。 +* SENTRY_TRACES_SAMPLE_RATE + + Sentry events 的上报比例,若为 0.01,则为 1%。 +* SENTRY_PROFILES_SAMPLE_RATE + + Sentry profiles 的上报比例,若为 0.01,则为 1%。 + +#### Notion 集成配置 + +Notion 集成配置,变量可通过申请 Notion integration 获取:[https://www.notion.so/my-integrations](https://www.notion.so/my-integrations) + +* NOTION_CLIENT_ID +* NOTION_CLIENT_SECRET + +#### 邮件相关配置 + +* MAIL_TYPE + + 邮件提供商类型,当前仅支持:`resend`([https://resend.com](https://resend.com)),留空则不发送邮件。 +* MAIL_DEFAULT_SEND_FROM + + 发件人邮箱名称,如:`no-reply `,非必填。 +* RESEND_API_KEY + + Resend 邮件提供商 API-Key,可前往 [API-Key](https://resend.com/api-keys) 获取。 + +#### 第三方授权设置 + +仅云端版可用。 + +* GITHUB_CLIENT_ID:GitHub 授权登录 Client ID +* GITHUB_CLIENT_SECRET:GitHub 授权登录 Client Secret +* GOOGLE_CLIENT_ID:Google 授权登录 Client ID +* GOOGLE_CLIENT_SECRET:Google 授权登录 Client Secret + +#### 平台托管模型相关配置 + +仅云端版可用,用于模型托管配置。 + +* HOSTED_OPENAI_ENABLED:启用 OpenAI 托管服务,默认 False +* HOSTED_OPENAI_API_KEY:OpenAI 托管服务的 API 密钥 +* HOSTED_OPENAI_API_BASE:OpenAI 托管服务的 API 基础地址,默认为空,即使用:`https://api.openai.com/v1` +* HOSTED_OPENAI_API_ORGANIZATION:OpenAI 托管服务的组织 ID,默认为空 +* HOSTED_OPENAI_QUOTA_LIMIT:OpenAI 托管服务的默认试用配额(单位:调用次数),默认 200 次调用 +* HOSTED_OPENAI_PAID_ENABLED:启用 OpenAI 托管付费服务,默认 False +* HOSTED_OPENAI_PAID_STRIPE_PRICE_ID:OpenAI 托管付费服务的 Stripe 价格 ID +* HOSTED_OPENAI_PAID_INCREASE_QUOTA:OpenAI 托管付费服务的支付后,增加配额数量 +* HOSTED_AZURE_OPENAI_ENABLED:启用 Azure OpenAI 托管服务,默认 False +* HOSTED_AZURE_OPENAI_API_KEY:Azure OpenAI 托管服务的 API 密钥 +* HOSTED_AZURE_OPENAI_API_BASE:Azure OpenAI 托管服务的 API 基础地址 +* HOSTED_AZURE_OPENAI_QUOTA_LIMIT:Azure OpenAI 托管服务的默认试用配额(单位:调用次数) +* HOSTED_ANTHROPIC_ENABLED:启用 Anthropic 托管服务,默认 False +* HOSTED_ANTHROPIC_API_BASE:Anthropic 托管服务的 API 基础地址,默认为空 +* HOSTED_ANTHROPIC_API_KEY:Anthropic 托管服务的 API 密钥 +* HOSTED_ANTHROPIC_QUOTA_LIMIT:Anthropic 托管服务的默认试用配额(单位:tokens),默认 600,000 tokens +* HOSTED_ANTHROPIC_PAID_ENABLED:启用 Anthropic 托管付费服务,默认 False +* HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID:Anthropic 托管付费服务的 Stripe 价格 ID +* HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA:Anthropic 托管付费服务的配额增加数量 +* HOSTED_ANTHROPIC_PAID_MIN_QUANTITY:Anthropic 托管付费服务的最小购买份数 +* HOSTED_ANTHROPIC_PAID_MAX_QUANTITY:Anthropic 托管付费服务的最大购买份数 +* STRIPE_API_KEY:Stripe 的 API 密钥 +* STRIPE_WEBHOOK_SECRET:Stripe 的 Webhook 密钥 + +*** + +### Web 前端 + +#### SENTRY_DSN + +Sentry DSN 地址,默认为空,为空时则所有监控信息均不上报 Sentry。 diff --git a/zh_CN/getting-started/install-self-hosted/local-source-code.md b/zh_CN/getting-started/install-self-hosted/local-source-code.md new file mode 100644 index 0000000..3c428df --- /dev/null +++ b/zh_CN/getting-started/install-self-hosted/local-source-code.md @@ -0,0 +1,221 @@ +# 本地源码启动 + +### 前置条件 + +| 操作系统 | 软件 | 说明 | +| -------------------------- | -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| macOS 10.14 or later | Docker Desktop | 将 Docker 虚拟机(VM)设置为使用至少 2 个虚拟 CPU(vCPU)和 8 GB 的初始内存。否则,安装可能会失败。有关更多信息,请参阅[在 Mac 上安装 Docker Desktop](https://docs.docker.com/desktop/mac/install/)。 | +| Linux platforms |

Docker 19.03 or later
Docker Compose 1.25.1 or later

| 请参阅[安装 Docker](https://docs.docker.com/engine/install/) 和[安装 Docker Compose](https://docs.docker.com/compose/install/) 以获取更多信息。 | +| Windows with WSL 2 enabled | Docker Desktop | 我们建议将源代码和其他数据绑定到 Linux 容器中时,将其存储在 Linux 文件系统中,而不是 Windows 文件系统中。有关更多信息,请参阅[使用 WSL 2 后端在 Windows 上安装 Docker Desktop](https://docs.docker.com/desktop/windows/install/#wsl-2-backend)。 | + +Clone Dify 代码: + +```Bash +git clone https://github.com/langgenius/dify.git +``` + +在启用业务服务之前,我们需要先部署 PostgresSQL / Redis / Weaviate(如果本地没有的话),可以通过以下命令启动: + +```Bash +cd docker +docker compose -f docker-compose.middleware.yaml up -d +``` + +*** + +### 服务端部署 + +* API 接口服务 +* Worker 异步队列消费服务 + +#### 安装基础环境 + +服务端启动需要使用到 Python 3.10.x,推荐使用 [Anaconda](https://docs.anaconda.com/free/anaconda/install/) 来快速安装 Python 环境,内部已包含 pip 包管理工具。 + +```Bash +# 创建名为 dify 的 Python 3.10 环境 +conda create --name dify python=3.10 +# 切换至 dify Python 环境 +conda activate dify +``` + +#### 启动步骤 + +1. 进入 api 目录 + + ``` + cd api + ``` +2. 复制环境变量配置文件 + + ``` + cp .env.example .env + ``` +3. 生成随机密钥,并替换 `.env` 中 `SECRET_KEY` 的值 + + ``` + openssl rand -base64 42 + sed -i 's/SECRET_KEY=.*/SECRET_KEY=/' .env + ``` +4. 安装依赖包 + + ``` + pip install -r requirements.txt + ``` +5. 执行数据库迁移 + + 将数据库结构迁移至最新版本。 + + ``` + flask db upgrade + ``` +6. 启动 API 服务 + + ``` + flask run --host 0.0.0.0 --port=5001 --debug + ``` + + 正确输出: + + ``` + * Debug mode: on + INFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5001 + INFO:werkzeug:Press CTRL+C to quit + INFO:werkzeug: * Restarting with stat + WARNING:werkzeug: * Debugger is active! + INFO:werkzeug: * Debugger PIN: 695-801-919 + ``` +7. 启动 Worker 服务 + + 用于消费一步队列任务,如数据集文件导入、更新数据集文档等异步操作。 Linux / MacOS 启动: + + ``` + celery -A app.celery worker -P gevent -c 1 -Q dataset,generation,mail --loglevel INFO + ``` + + 如果使用 Windows 系统启动,请替换为该命令: + + ``` + celery -A app.celery worker -P solo --without-gossip --without-mingle -Q dataset,generation,mail --loglevel INFO + ``` + + 正确输出: + + ``` + -------------- celery@TAKATOST.lan v5.2.7 (dawn-chorus) + --- ***** ----- + -- ******* ---- macOS-10.16-x86_64-i386-64bit 2023-07-31 12:58:08 + - *** --- * --- + - ** ---------- [config] + - ** ---------- .> app: app:0x7fb568572a10 + - ** ---------- .> transport: redis://:**@localhost:6379/1 + - ** ---------- .> results: postgresql://postgres:**@localhost:5432/dify + - *** --- * --- .> concurrency: 1 (gevent) + -- ******* ---- .> task events: OFF (enable -E to monitor tasks in this worker) + --- ***** ----- + -------------- [queues] + .> dataset exchange=dataset(direct) key=dataset + .> generation exchange=generation(direct) key=generation + .> mail exchange=mail(direct) key=mail + + [tasks] + . tasks.add_document_to_index_task.add_document_to_index_task + . tasks.clean_dataset_task.clean_dataset_task + . tasks.clean_document_task.clean_document_task + . tasks.clean_notion_document_task.clean_notion_document_task + . tasks.create_segment_to_index_task.create_segment_to_index_task + . tasks.deal_dataset_vector_index_task.deal_dataset_vector_index_task + . tasks.document_indexing_sync_task.document_indexing_sync_task + . tasks.document_indexing_task.document_indexing_task + . tasks.document_indexing_update_task.document_indexing_update_task + . tasks.enable_segment_to_index_task.enable_segment_to_index_task + . tasks.generate_conversation_summary_task.generate_conversation_summary_task + . tasks.mail_invite_member_task.send_invite_member_mail_task + . tasks.remove_document_from_index_task.remove_document_from_index_task + . tasks.remove_segment_from_index_task.remove_segment_from_index_task + . tasks.update_segment_index_task.update_segment_index_task + . tasks.update_segment_keyword_index_task.update_segment_keyword_index_task + + [2023-07-31 12:58:08,831: INFO/MainProcess] Connected to redis://:**@localhost:6379/1 + [2023-07-31 12:58:08,840: INFO/MainProcess] mingle: searching for neighbors + [2023-07-31 12:58:09,873: INFO/MainProcess] mingle: all alone + [2023-07-31 12:58:09,886: INFO/MainProcess] pidbox: Connected to redis://:**@localhost:6379/1. + [2023-07-31 12:58:09,890: INFO/MainProcess] celery@TAKATOST.lan ready. + ``` + +*** + +### 前端页面部署 + +Web 前端客户端页面服务 + +#### 安装基础环境 + +Web 前端服务启动需要用到 [Node.js v18.x (LTS)](http://nodejs.org) 、[NPM 版本 8.x.x ](https://www.npmjs.com/)或 [Yarn](https://yarnpkg.com/)。 + +* 安装 NodeJS + NPM + +进入 https://nodejs.org/en/download,选择对应操作系统的 v18.x 以上的安装包下载并安装,建议 stable 版本,已自带 NPM。 + +#### 启动步骤 + +1. 进入 web 目录 + + ``` + cd web + ``` +2. 安装依赖包 + + ``` + npm install + ``` +3. 配置环境变量。在当前目录下创建文件 `.env.local`,并复制`.env.example`中的内容。根据需求修改这些环境变量的值: + + ``` + # For production release, change this to PRODUCTION + NEXT_PUBLIC_DEPLOY_ENV=DEVELOPMENT + # The deployment edition, SELF_HOSTED or CLOUD + NEXT_PUBLIC_EDITION=SELF_HOSTED + # The base URL of console application, refers to the Console base URL of WEB service if console domain is + # different from api or web app domain. + # example: http://cloud.dify.ai/console/api + NEXT_PUBLIC_API_PREFIX=http://localhost:5001/console/api + # The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from + # console or api domain. + # example: http://udify.app/api + NEXT_PUBLIC_PUBLIC_API_PREFIX=http://localhost:5001/api + + # SENTRY + NEXT_PUBLIC_SENTRY_DSN= + NEXT_PUBLIC_SENTRY_ORG= + NEXT_PUBLIC_SENTRY_PROJECT= + ``` +4. 构建代码 + + ``` + npm run build + ``` +5. 启动 web 服务 + + ``` + npm run start + # or + yarn start + # or + pnpm start + ``` + +正常启动后,终端会输出如下信息: + +``` +ready - started server on 0.0.0.0:3000, url: http://localhost:3000 +warn - You have enabled experimental feature (appDir) in next.config.js. +warn - Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk. +info - Thank you for testing `appDir` please leave your feedback at https://nextjs.link/app-feedback +``` + +### 访问 Dify + +最后,访问 http://127.0.0.1:3000 即可使用本地部署的 Dify。 diff --git a/zh_CN/getting-started/install-self-hosted/start-the-frontend-docker-container.md b/zh_CN/getting-started/install-self-hosted/start-the-frontend-docker-container.md new file mode 100644 index 0000000..6f0f0d5 --- /dev/null +++ b/zh_CN/getting-started/install-self-hosted/start-the-frontend-docker-container.md @@ -0,0 +1,24 @@ +# 单独启动前端 Docker 容器 + +当单独开发后端时,可能只需要源码启动后端服务,而不需要本地构建前端代码并启动,因此可以直接通过拉取 docker 镜像并启动容器的方式来启动前端服务,以下为具体步骤: + +#### 直接使用 DockerHub 镜像 + +```Bash +docker run -it -p 3000:3000 -e EDITION=SELF_HOSTED -e CONSOLE_URL=http://127.0.0.1:3000 -e APP_URL=http://127.0.0.1:3000 langgenius/dify-web:latest +``` + +#### 源码构建 Docker 镜像 + +1. 构建前端镜像 + + ``` + cd web && docker build . -t dify-web + ``` +2. 启动前端镜像 + + ``` + docker run -it -p 3000:3000 -e EDITION=SELF_HOSTED -e CONSOLE_URL=http://127.0.0.1:3000 -e APP_URL=http://127.0.0.1:3000 dify-web + ``` +3. 当控制台域名和 Web APP 域名不一致时,可单独设置 `CONSOLE_URL` 和 `APP_URL` +4. 本地访问 [http://127.0.0.1:3000](http://127.0.0.1:3000) diff --git a/zh_CN/getting-started/what-is-llmops.md b/zh_CN/getting-started/what-is-llmops.md new file mode 100644 index 0000000..dfde4e7 --- /dev/null +++ b/zh_CN/getting-started/what-is-llmops.md @@ -0,0 +1,27 @@ +# 什么是 LLMOps? + +**LLMOps(Large Language Model Operations)是一个涵盖了大型语言模型(如GPT系列)开发、部署、维护和优化的一整套实践和流程。LLMOps 的目标是确保高效、可扩展和安全地使用这些强大的 AI 模型来构建和运行实际应用程序。它涉及到模型训练、部署、监控、更新、安全性和合规性等方面。** + +下表说明了使用 Dify 前后开发 AI 应用的各环节差异: + +
步骤未使用 LLMOps 平台使用 Dify LLMOps 平台时间差异
开发应用前&后端集成和封装 LLM 能力,花费较多时间开发前端应用直接使用 Dify 的后端服务,可基于 WebApp 脚手架开发 -80%
Prompt Engineering仅能通过调用 API 或 Playground 进行结合用户输入数据所见即所得完成调试-25%
数据准备与嵌入编写代码实现长文本数据处理、嵌入在平台上传文本或绑定数据源即可-80%
应用日志与分析编写代码记录日志,访问数据库查看平台提供实时日志与分析-70%
数据分析与微调技术人员进行数据管理和创建微调队列非技术人员可协同,可视化模型调整-60%
AI 插件开发与集成编写代码创建、集成 AI 插件平台提供可视化工具创建、集成插件能力-50%
+ +在使用 LLMOps 平台如 Dify 之前,基于 LLM 开发应用的过程可能会非常繁琐和耗时。开发者需要自行处理各个阶段的任务,这可能导致效率低下、难以扩展和安全性问题。以下是使用 LLMOps 平台前的开发过程: + +1. 数据准备:手动收集和预处理数据,可能涉及到复杂的数据清洗和标注工作,需要编写较多代码。 +2. Prompt Engineering:开发者只能通过调用 API 或 Playground 进行 Prompt 编写和调试,缺乏实时反馈和可视化调试。 +3. 嵌入和上下文管理:手动处理长上下文的嵌入和存储,难以优化和扩展,需要不少编程工作,熟悉模型嵌入和向量数据库等技术。 +4. 应用监控与维护:手动收集和分析性能数据,可能无法实时发现和处理问题,甚至可能没有日志记录。 +5. 模型微调:自行处理微调数据准备和训练过程,可能导致效率低下,需要编写更多代码。 +6. 系统和运营:需要技术人员参与或花费成本开发管理后台,增加开发和维护成本,缺乏多人协同和对非技术人员的友好支持。 + +引入 Dify 这样的 LLMOps 平台后,基于 LLM 开发应用的过程将变得更加高效、可扩展和安全。以下是使用 Dify 进行 LLM 应用开发的优势: + +1. 数据准备:平台提供数据收集和预处理工具,简化了数据清洗和标注的工作,最小化甚至消除了编码工作。 +2. Prompt Engineering:所见即所得的 Prompt 编辑和调试,可根据用户输入的数据进行实时优化和调整。 +3. 嵌入和上下文管理:自动处理长上下文的嵌入、存储和管理,提高效率和扩展性,无需编写大量代码。 +4. 应用监控与维护:实时监控性能数据,快速发现和处理问题,确保应用程序的稳定运行,提供完整的日志记录。 +5. 模型微调:平台提供一键微调功能,基于过去已标注的真实使用数据进行训练,提高模型性能,减少编程工作。 +6. 系统和运营:易用的界面,非技术人员也可参与,支持多人协同,降低开发和维护成本。与传统开发方式相比,Dify 提供了更加透明和易于监控的应用管理,让团队成员更好地了解应用的运行情况。 + +另外,Dify 将提供 AI 插件开发和集成的功能,使得开发者可以轻松地为各种应用创建和部署基于 LLM 的插件,进一步提升了开发效率和应用的价值。 diff --git a/zh_CN/use-cases/build-an-notion-ai-assistant.md b/zh_CN/use-cases/build-an-notion-ai-assistant.md new file mode 100644 index 0000000..1ef2b70 --- /dev/null +++ b/zh_CN/use-cases/build-an-notion-ai-assistant.md @@ -0,0 +1,164 @@ +# 构建一个 Notion AI 助手 + +_作者:阿乔. Dify 用户_ + +### 概述 + +Notion 是一个强大的知识管理工具。它的灵活性和可扩展性使其成为一个出色的个人知识库和共享工作空间。许多人使用它来存储他们的知识,并与他人协作,促进思想交流和新知识的创造。 + +然而,这些知识仍然是静态的,因为用户必须搜索他们需要的信息并阅读其中的内容才能找到他们寻求的答案。这个过程既不特别高效,也不智能。 你是否曾经梦想过拥有一个基于你的 Notion 库的 AI 助手?这个助手不仅可以帮助你审查知识库,还可以像一位经验丰富的管家一样参与交流,甚至回答其他人的问题,就好像你是自己的个人 Notion 库的主人一样。 + +### 如何实现自己的 Notion AI 助手? + +现在,你可以通过 Dify 来实现这个梦想。Dify 是一个开源的 LLMOps(大型语言模型运维)平台。 ChatGPT 和 Claude 等大型语言模型已经利用其强大的能力改变了世界。它们的强大学习能力主要归功于丰富的训练数据。幸运的是,它们已经发展到足够智能的程度,可以从你提供的内容中进行学习,从而使从个人 Notion 库中生成创意成为现实。 在没有 Dify 的情况下,你可能需要熟悉 langchain,这是一个简化组装这些要素过程的抽象概念。 + +### 如何使用Dify创建自己的AI助手? + +训练Notion AI助手的过程非常简单。您只需要按照如下步骤操作: + +1.登录 Dify。 + +2.创建一个数据集。 + +3.将 Notion 和数据集连接起来。 + +4.开始训练。 + +5.创建自己的AI应用程序。 + +#### 1. 登录 Dify[​](https://wsyfin.com/notion-dify#1-login-to-dify) + +点击这里登录到 Dify。你可以使用你的 GitHub 或 Google 账户方便地登录。 + +> 如果你使用 GitHub 账户登录,不妨给这个[项目](https://github.com/langgenius/dify)点个星星吧?这真的对我们有很大的支持! + +![login-1](https://pan.wsyfin.com/f/ERGcp/login-1.png) + +#### 2.创建新的数据集 + +点击顶部侧边栏的 "Datasets" 按钮,然后点击 "Create Dataset" 按钮。 + +![login-2](https://pan.wsyfin.com/f/G6ziA/login-2.png) + +#### 3. 与 Notion 和您的数据集进行连接 + +选择 "Sync from Notion",然后点击 "Connect" 按钮。 + +![connect-with-notion-1](https://pan.wsyfin.com/f/J6WsK/connect-with-notion-1.png) + +然后,您将被重定向到 Notion 登录页面。使用您的 Notion 帐户登录。使用您的 Notion 帐户登录。 + +![connect-with-notion-2](https://pan.wsyfin.com/f/KrEi4/connect-with-notion-2.png) + +检查 Dify 所需的权限,然后单击“选择页面”按钮。 + +![connect-with-notion-3](https://pan.wsyfin.com/f/L91iQ/connect-with-notion-3.png) + +选择你要和 Dify 同步的页面,然后点击“允许访问”按钮。 + +![connect-with-notion-4](https://pan.wsyfin.com/f/M8Xtz/connect-with-notion-4.png) + +#### 4. 开始训练 + +指定需要让 AI 学习的页面,使其能够理解 Notion 中这个部分的内容。然后点击 "下一步" 按钮。 + +![train-1](https://pan.wsyfin.com/f/Nkjuj/train-1.png) + +我们建议选择 "自动" 和 "高质量" 的选项来训练你的 AI 助手。然后点击 "保存并处理" 按钮。 + +![train-2](https://pan.wsyfin.com/f/OYoCv/train-2.png) + +等待几秒钟,embedding 处理进程完成。 + +![train-3](https://pan.wsyfin.com/f/PN9F3/train-3.png) + +#### 5. 创建你自己的 AI 应用程序[​](https://wsyfin.com/notion-dify#5-create-your-own-ai-application) + +你需要创建一个AI应用,然后连接刚刚创建的数据集。返回到仪表板,然后点击“创建新应用”按钮。建议直接使用聊天应用。 + +![create-app-1](https://pan.wsyfin.com/f/QWRHo/create-app-1.png) + +选择“Prompt Eng.”并在“context”中添加你的 Notion 数据集。 + +![create-app-2](https://pan.wsyfin.com/f/R6DT5/create-app-2.png) + +我建议在你的 AI 应用程序中添加一个「预设提示」。就像咒语对于哈利·波特来说是必不可少的一样,某些工具或功能可以极大地增强 AI 应用程序的能力。 + +例如,如果你的 Notion 笔记主要关注软件开发中的问题解决,可以在其中一个提示中写道: + +> 我希望你能在我的 Notion 工作区中扮演一个 IT 专家的角色,利用你对计算机科学、网络基础设施、Notion 笔记和 IT 安全的知识来解决问题。 + +
+ +建议初始时启用 AI 主动提供用户一个起始句子,给出可以询问的线索。此外,激活「语音转文字」功能可以让用户通过语音与你的 AI 助手进行互动。 + +
+ +现在您可以在“概览”中单击公共 URL 聊天与您自己的 AI 助手! + +
+ +### 通过API集成到您的项目中​ + +通过 Dify 打造的每个 AI 应用程序都可以通过其 API 进行访问。这种方法允许开发人员直接利用前端应用程序中强大的大型语言模型(LLM)的特性,提供真正的“后端即服务”(BaaS)体验。 + +通过无缝的 API 集成,您可以方便地调用您的 Notion AI 应用程序,无需复杂的配置。 + +在概览页面上点击「API 参考」按钮。您可以将其作为您应用程序的 API 文档参考。 + +![using-api-1](https://pan.wsyfin.com/f/wp0Cy/using-api-1.png) + +#### 1. 生成 API 密钥 + +为了安全起见,建议生成 API 密钥以访问您的 AI 应用。 + +![using-api-2](https://pan.wsyfin.com/f/xk2Fx/using-api-2.png) + +#### 2.检索会话ID + +与 AI 应用程序聊天后,您可以从“Logs & Ann.”页面检索会话 ID。 + +![using-api-3](https://pan.wsyfin.com/f/yPXHL/using-api-3.png) + +#### 3. 调用API + +您可以在API文档上运行示例请求代码来调用终端中的AI应用程序。 + +记住替换代码中的SECRET KEY和conversation_id。 + +您可以在第一次输入空的conversation_id,在收到包含conversation_id的响应后将其替换。 + +``` +curl --location --request POST 'https://api.dify.ai/v1/chat-messages' \ +--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "inputs": {}, + "query": "eh", + "response_mode": "streaming", + "conversation_id": "", + "user": "abc-123" +}' +``` + +在终端中发送请求,您将获得成功的响应。 + +![using-api-4](https://pan.wsyfin.com/f/zpnI4/using-api-4.png) + +如果您想继续此聊天,请将请求代码的`conversation_id`替换为您从响应中获得的`conversation_id`。 + +你可以在`"Logs & Ann "`页面查看所有的对话记录。 + +![using-api-5](https://pan.wsyfin.com/f/ADQSE/using-api-5.png) + +### 周期性地与 Notion 同步 + +如果你的 Notion 页面更新了,你可以定期与 Dify 同步,让你的人工智能助手保持最新状态。你的人工智能助手将从新内容中学习并回答新问题。 + +![create-app-5](https://pan.wsyfin.com/f/XDBfO/create-app-5.png) + +### 总结 + +在本教程中,我们不仅学会了如何将您的 Notion 数据导入到 Dify 中,还学会了如何使用 API 将其与您的项目集成。 + +Dify 是一个用户友好的 LLMOps 平台,旨在赋予更多人创建可持续的 AI 原生应用程序的能力。通过为各种应用类型设计的可视化编排,Dify 提供了可供使用的应用程序,可以帮助您利用数据打造独特的 AI 助手。如果您有任何疑问,请随时与我们联系。 diff --git a/zh_CN/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code.md b/zh_CN/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code.md new file mode 100644 index 0000000..4c15182 --- /dev/null +++ b/zh_CN/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code.md @@ -0,0 +1,58 @@ +# 教你十几分钟不用代码创建 Midjoureny 提示词机器人 + +_作者:歸藏的 AI 工具箱_ + +试用了一下前几天我周刊里推荐的自然语言编程工具 Dify,它是 @goocarlos 开发的能够让一个不会代码的人只通过编写提示词生成一个 Web 应用,甚至连 API 也生成好了,你可以接入以后部署到你希望部署的平台上去。 + + +下面这个应用就是我花 20 分钟写的,效果非常好,如果没有 Dify 我可能需要搞好久才能搞定。具体的功能就是根据输入的简短的主题生成 Midjourney 的提示词,也能帮助你快速填写常见的 Midjourney 命令。下面我会写一下这个应用的创建过程帮大家熟悉一下这个平台。 + +Dify 可以选择两种应用一种是对话型应用类似于 ChatGPT 那种,另一种是文本生成类应用没有多轮对话的机制点击按钮直接生成文本内容。我们要创建的 Midjoureny 提示词机器人,所以我们选择文本生成应用就行。 + +你可以在这里访问 Dify:https://dify.ai/ + +
+ +我们输入名称创建完成之后看到的页面就是我们的看板页面里面有一些数据监控和应用设置。我们先点击左侧的提示词编排,这里就是主要的工作页面了。 + +
+ +这个页面左侧是提示词设置和一些其他功能,右侧可以实时预览和使用你创建的内容。前缀提示词就是用户每次输入内容后触发的提示词了,可以理解为每次 GPT 都会按照你的前缀提示词内容去处理用户输入的信息。 + +
+ +可以看一下我的前缀提示词结构,主要有两部分前面是中文写的让 GPT 按照下面英文的结构输出照片的描述。英文的结构就是生成提示词的模板了,主要意思是【主题彩色照片,错综复杂的图案,鲜明的对比,环境描述,相机型号,与输入内容相关的镜头焦距描述,与输入内容相关的构图描述,四位摄影师的名字】这个就是提示词的主要内容了。理论上你现在保存到右边预览区域输入你想生成的主题就能生成对应提示词了。 + +
+ +那可能有人注意到了后面的 \{{proportion\}} 和 \{{version\}} 是什么呢,可以看到我右边需要用户选择图像比例和模型版本这两个变量就是用来传递用户选择的信息的。我们看一下如何设置。 + +
+ +我们的功能是把用户选择的信息填到提示词的最后方便用户直接复制不用重新填写和记忆这些命令,这里就要用到下面的变量功能。 + +变量的作用就是把用户表单填写或者选择的内容动态的带入到提示词里面。比如我这里创建了两个变量一个代表图像比例一个代表模型版本。点击添加按钮创建变量。 + +
+ +创建之后首先需要填写前面的变量 Key 和字段名称,变量 Key 需要英文。后面的可选开启之后这个字段就会是选填的。之后我们需要点击操作栏的设置来设置变量内容。 + +
+ +变量包括两种内容一种是文本变量一种是下拉选项,文本的就是用户需要手动输入,下拉的话就是选择了。这里我们主要是不想用户手打命令,所以我们选择下拉选项。增加需要的选项就行。 + +
+ +之后就是使用变量了,我们需要把变量 Key 用两个 {} 包起来填写到前缀提示词里。这里由于我们不想让 GPT 改用户选择的内容,我加了一句提示词“如实输出变量内容”。 + +
+ +加了上面那句话 GPT 还是有一定几率改掉我们的变量内容怎么办,我们可以在右侧模型选择这里把多样性调低,这样创造性输出就会降低,他就不容易改写我们的变量内容了。其他的几个参数的意思可看小叹号了解。 + +
+ +好了到这里我们的应用就创建完成了,测试输出没问题以后点击右上角的发布按钮,你的应用就发布了。你你和用户可以通过公开访问 URL 的地址访问你的应用。可以在设置里设置应用名称以及简介、图标之类的内容。 + +
+ +这就是利用 Dify 创建一个简单 AI 应用的流程,你还可以通过生成的 API 把你的应用部署在其他平台或者更改它的 UI。同时 Dify 还支持上传你自己的数据,比如你可以建一个客服机器人帮助你回答自己产品的相关问题。教程到这里就结束了,感谢 @goocarlos 创建了这么好的产品。 diff --git a/zh_CN/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md b/zh_CN/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md new file mode 100644 index 0000000..9e7f05e --- /dev/null +++ b/zh_CN/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md @@ -0,0 +1,68 @@ +# 如何在几分钟内创建一个带有业务数据的官网 AI 智能客服 + +可能 AI 智能客服是每个业务网站的标配,在大型语言模型能力广泛被应用后,智能客服的实现变得更加轻易,可定制化的程度也更高。以下内容,将指导你如何在几分钟时间内,使用 Dify 创建你网站的 AI 智能客服。**Dify 产品支持将对话型应用嵌入到网页,你**只需要**花费几分钟**就可以将对话型应用**免费**嵌入到你的官网上,**定制你的 AI 智能客服。即使非技术人员也能搞定!** + +#### 首先,你需要理解 Dify.AI 是什么? + +Dify 是一个开源且非常简单易用的 LLMOps 平台,让你能够可视化快速创建并运营 AI 应用的工具平台。Dify 提供了可视化的 Prompt 编排、运营、数据集管理等功能。你甚至无需具备 AI 相关的技术研究和晦涩概念的理解。Dify 对接了各个出色的大型语言模型供应商,如 OpenAI、Azure OpenAI、Antropic 等,已提供 GPT 系列、Claude 系列模型,未来也将接入优秀的开源模型。这一切都是可以在设置中切换使用。这意味着,你在创建调试应用时,可以对比不同模型的效果,以确定使用最适合你的模型。**基于 Dify,你不仅可以很轻易地开发一个 AI 智能客服,还可以创造符合你使用习惯和需求的文本写作助手、虚拟招聘 HR 专家、会议总结助手、翻译助手等各种文本生成型应用,为你的工作提效。** + +
+ +**前提条件** + +**注册或部署 Dify.AI** + +Dify 是一个开源产品,你可以在 GitHub (https://github.com/langgenius/dify) 上找到它并部署在你本地或公司内网。同时它提供了云端 SaaS 版本,访问 https://dify.ai/zh 注册即可使用。 + +**申请 OpenAI 等模型厂商的 API key** + +AI 模型的消息调用需要消耗 token,Dify 提供了 OpenAI GPT 系列(200 次) 和 Antropic Claude(1000 次) 模型的消息免费调用使用额度,在你消耗完毕前,需要通过模型厂商的官方渠道申请你自己的 API key。在 Dify 的【设置】--【模型提供商】处可填入 key。 + +#### 上传你的产品文档或知识库 + +如果你希望能基于公司现有的知识库和产品文档构建人工智能客服,来与用户交流,那么你需要尽可能将你产品有关的文档上传到 Dify 的数据集中。Dify 帮助你完成数据的**分段处理和清洗**。Dify数据集支持高质量和经济两种索引模式,我们推荐使用高质量模式,会消耗 token 但能提供更高的准确性。操作步骤:在 【数据集】页面,新建一个数据集,上传你的业务数据(支持批量上传多个文本),选择清洗方式,【保存并处理】,只需几秒钟即可完成处理。 + +
+ +**创建一个 AI 应用,给 AI 指令** + +在【构建应用】页面创建一个对话型应用。然后开始设置 AI 指令和它在前端和用户交互的体验: + +1. **给 AI 指令:**点击左侧【提示词编排】编辑你的 Prompt ,让它扮演客服的角色与用户交流,你可以指定它和用户交流的语气、风格、限定它回答问题的范围; +2. **让 AI 拥有你的业务知识:**在【上下文】中添加你刚才上传的目标数据集; +3. **设置一个【对话开场白】:**点击 【添加功能】打开功能开关。目的是为 AI 应用添加一个开场白,在用户打开客服窗口时,它会先和用户打招呼,增加亲和感。 +4. **设置【下一步问题建议】:**在【添加功能】开启此功能。目的是为了让用户在提完一个问题后,给用户进行下一步提问的方向提示。 +5. **选择一个合适的模型并调整参数:**页面右上角可以选择不同的模型。不同模型的表现和消耗的 token 价格都不一样。这个例子中,我们使用 GPT3.5 模型。 + +在这个 case 中,我们给 AI 指定了扮演的角色: + +> 指令:你将扮演 Dify 的 AI 智能客服,你是 Dify 的第一个 AI 员工,名字叫 Bob。专门解答关于 Dify 产品、团队或 LLMOps 相关的用户问题。请注意,当用户提出的问题不在你的上下文内容范围内时,请回答不知道。请以友好的语气和用户交流,可以适当加入一些 emoji 表情增进与用户之间的互动。 + +> 开场白:你好,我是 Bob☀️, Dify的第一位AI成员。您可以与我讨论任何与Dify产品、团队甚至LLMOps相关的问题。 + +
+ +**调试 AI 智能客服的表现并发布** + +完成以上的设置后,你可以在当前的页面右侧给它发送信息调试它的表现是否符合预期。然后点击【发布】。这时候你就已经拥有了一个 AI 智能客服。 + +
+ +**将 AI 客服应用嵌入到你的前端页面** + +这一步,是将准备好的 AI 智能客服嵌入到你的官网页面。依次点击【概览】->【嵌入】,选择 **script 标签方式,**将 script 代码复制到你网站 `` 或 `` 标签中。如你是非技术人员,可让负责官网的开发帮忙粘贴并更新页面。 + +
+ +1. 将复制的代码粘贴到你官网的目标位置: + +
+ +1. 更新你的官网,即可以得到一个拥有你业务数据的官网 AI智能客服。试一试效果: + +
+ +以上通过 Dify 官网 AI 客服 Bob 的例子演示了如何将 Dify 应用嵌入到官网的具体操作步骤。当然,你还可以通过 Dify 提供的更多特性来增加 AI 客服的表现,例如增加一些变量设置,让用户在互动前填入必要的判断信息,如名字、使用的具体产品等等。 +欢迎你一起来探索,定制企业的 AI 智能客服。 + +
diff --git a/zh_CN/use-cases/integrate-with-wecom-using-dify.md b/zh_CN/use-cases/integrate-with-wecom-using-dify.md new file mode 100644 index 0000000..ceb767d --- /dev/null +++ b/zh_CN/use-cases/integrate-with-wecom-using-dify.md @@ -0,0 +1,347 @@ +# 零代码,使用 Dify 两分钟接入企业微信 + +_作者:小聚乙烯人_ + +Dify 允许创建 AI 应用,并提供二次开发的能力。这里我将演示创建一个法律问答助手的 AI 应用,称作“知法”。在本篇教程中,我将指导你为“知法”接入企业微信。 + + + +## 前置准备 + +* 企业微信的管理员权限 +* 一个 [Dify](https://dify.ai/) 的帐号 +* 一个 [Laf](https://laf.run/) 云的帐号 +* (可选)一个 OpenAI 的 API Key。如果没有,可以使用 Dify 免费提供的 200 次调用机会用于测试。 +* (可选)在电脑上新建一个 env.txt 的文件,将下面内容复制到 env.txt 中。在接下来的教程中,我们会一步步把相关的信息填入这个文件。需要保存信息的步骤会高亮显示。 + +```JavaScript +WXWORK_TOKEN="" +WXWORK_AESKEY="" +WXWORK_CORPID="" +WXWORK_AGENTID="" +WXWORK_CORPSECRET="" +DIFY_APPTOKEN="" +``` + +## 在 Dify 上制作应用 + +这一章节将会介绍如何创建一个法律知识的数据集,并将数据集和应用关联起来。 + +### 搭建法律知识数据集 + +> 随时查看文档中关于搭建数据集的更多操作:[【数据集管理】](https://docs.dify.ai/v/zh-hans/advanced/datasets) + +为了让“知法”了解到更多的上下文,我们需要创建一个法律知识的数据库。 + + + +* **导入文档:**从电脑上导入法律知识的 PDF 文档。 + +
+ +* **文本分段和清洗**:上传的文本需要经过二次加工,才能被大语言模型理解。这里我们不需要关注具体的实现逻辑,直接选择自动分段即可,然后点击“保存并处理”。 + +
+ +* **文本嵌入:**大约 30s 时间,数据集就创建成功了。你可以随时回来向数据库里添加更多文件。 + +
+ +
+ +### 搭建的应用 + +> 随时查看文档中关于创建应用的更多操作 [【创建应用】](https://docs.dify.ai/v/zh-hans/application/creating-an-application) + +* **创建应用:**根据图中的指示,创建一个对话型应用,并命名为“知法”。 + +
+ +* **关联数据集:**在“提示词编排”页,在“上下文”模块中添加选择刚刚创建的数据集。 + +
+ +* **发布模型:**完成关联数据集后,点击页面右上角的“发布”,使模型生效。 + +
+ +* **获取 API 访问密钥。**在“访问 API”页面,创建一个 API 密钥并复制保存为`DIFY_APPTOKEN`。请注意不要把密钥泄漏给任何人,以免造成财产损失。 + +
+ +## 创建企业微信应用 + +* **记录企业信息:**进入企业微信管理后台-我的企业,记录这里的企业 ID 为 `WXWORK_CORPID` + +
+ +* **创建企业微信应用:**进入应用管理页面,点击【创建应用】进入创建页面,填写应用信息后点击【创建应用】。如果已经有现成的应用,可以跳过此步骤。 + +
+ +
+ +* **记录企业微信应用信息:**在应用管理页面点击刚刚创建好的应用,进入应用详情页面。记录这里的 AgentId 和 Secret(需要点击获取按钮,在企业微信聊天窗口里面获取),分别为WXWORK_AGENTID和WXWORK_CORPSECRET。 + +
+ +* **企业微信应用接收信息:**在应用详情页面,接收消息处点击【设置 API 接收】。 + +
+ +在 API 接收消息页面,点一下两个【随机获取】按钮,它会自动生成一个 Token 和 EncodingAESKey,我们分别记为 WXWORK_TOKEN 和 WXWORK_AESKEY。注意,不要关掉这个页面,Laf 侧配置完毕后我们再来填写 URL。 + + +
+ +## 在 Laf 云上创建云函数 + +* **新建 Laf 云应用:**进入 Laf 后,点击新建,创建一个云应用。这里选择免费的计划即可。 + +
+ +* **添加依赖:**企业微信应用需要添加`@wecom/crypto`, `xml2js` 两个依赖。添加好后,你的依赖列表应该像下面一样。 + +
+ +* **添加环境变量:**从第二行开始,将上面步骤中收集到的所有内容全部粘贴到这里,点击更新。 + +
+ +* **创建云函数:**点击创建一个云函数,注意“请求方法”中勾选上`POST`, `GET`,点击确定。 + +
+ +在创建好云函数中,删除默认的代码,并将文末**“附录”**中的代码全部粘贴到这里。 + +
+ +* **发布云函数:**点击发布后,云函数就生效了。 + +
+ +现在把 URL 粘贴到企业微信后台【设置 API 接收】的页面中刚刚留白的地方,然后点击保存。 + +
+ +* **配置 IP 白名单:**在企业微信中找到刚刚创建应用,发送一句消息。不出意外收不到任何消息。这是因为企业微信默认屏蔽了 Laf 云的 IP。 + +点击日志,应当能看到这样一条报错 '`not allow to access from your ip`' + +
+ +点击查看这条日志详情,记录日志中给出的 Laf 云 IP。 + +
+ +回到企业微信的管理后台,点击刚刚创建的应用,为应用配置可行 IP。 + +
+ +在这里把刚刚的日志中记录的 IP 填入即可。 + +
+ +## 验证效果 + +1. **测试聊天:**在企业微信中找到刚刚创建应用,发送一句消息。现在应当能收到推送的消息了。 + +
+ +## 引用 + +这篇深度参考以下文章,感谢原作者的辛勤付出。[https://forum.laf.run/d/556/3](https://forum.laf.run/d/556/3) + +## 附录 + +### 企业微信应用代码 - (伪流式响应) + +```JavaScript +import cloud from '@lafjs/cloud' +import { decrypt, getSignature } from '@wecom/crypto' +import xml2js from 'xml2js' + +function genConversationKey(userName) { + return `${process.env.WXWORK_AGENTID}:${userName}` +} + +function genWxAppAccessTokenKey() { + return `${process.env.WXWORK_AGENTID}:access-token` +} + +async function getToken() { + console.log('[getToken] called') + + const cache = cloud.shared.get(genWxAppAccessTokenKey()) + if (cache && cache.expires >= Date.now()) return cache.token + + const res = await cloud.fetch({ + url: 'https://qyapi.weixin.qq.com/cgi-bin/gettoken', + method: 'GET', + params: { + corpid: process.env.WXWORK_CORPID, + corpsecret: process.env.WXWORK_CORPSECRET, + } + }) + + const token = res.data.access_token + cloud.shared.set(genWxAppAccessTokenKey(), { token, expires: Date.now() + res.data.expires_in * 1000 }) + return token +} + +async function sendWxMessage(message, user) { + console.log('[sendWxMessage] called', user, message) + + const res = await cloud.fetch({ + url: 'https://qyapi.weixin.qq.com/cgi-bin/message/send', + method: 'POST', + params: { + access_token: await getToken() + }, + data: { + "touser": user, + "msgtype": "text", + "agentid": process.env.WXWORK_AGENTID, + "text": { + "content": message + }, + "safe": 0, + "enable_id_trans": 0, + "enable_duplicate_check": 0, + "duplicate_check_interval": 1800 + }, + }) + console.log('[sendWxMessage] received', res.data) +} + +async function sendDifyMessage(message, userName, onMessage) { + console.log('[sendDifyMessage] called', message, userName) + + const conversationId = cloud.shared.get(genConversationKey(userName)) || null + let newConversationId = '' + let responseText = '' + + try { + const response = await cloud.fetch({ + url: 'https://api.dify.ai/v1/chat-messages', + method: 'POST', + headers: { + 'Authorization': `Bearer ${process.env.DIFY_APPTOKEN}` + }, + data: { + inputs: {}, + response_mode: "streaming", + query: message, + user: userName, + conversation_id: conversationId + }, + responseType: "stream" + }) + + let firstHalfMessage = '' + response.data.on('data', (data) => { + let message = data.toString() + try { + if (firstHalfMessage) { + message += firstHalfMessage + firstHalfMessage = '' + } + + // 检查是不是sse协议 + if (!message.startsWith('data: ')) return + + const parsedChunk: Record = JSON.parse(message.substring(6)) + + if (!newConversationId) { + newConversationId = parsedChunk.conversation_id + cloud.shared.set(genConversationKey(userName), newConversationId) + } + const { answer } = parsedChunk + responseText += answer + + // 伪流式响应 + if (answer.endsWith('\n\n') || (responseText.length > 120 && /[?。;!]$/.test(responseText))) { + onMessage(responseText.replace('\n\n', '')) + console.log('[sendDifyMessage] received', responseText, newConversationId) + responseText = '' + } + } catch (e) { + firstHalfMessage = message + console.error('[sendDifyMessage] error', message) + } + + }) + + // stream结束时把剩下的消息全部发出去 + response.data.on('end', () => { + onMessage(responseText.replace('\n\n', '')) + }) + } catch (e) { + console.error("[sendDifyMessage] error", e) + } +} + +async function asyncSendMessage(xml) { + console.log('[asyncSendMessage] called', xml) + + if (xml.MsgType[0] !== 'text') return + + const message = xml.Content[0] + const userName = xml.FromUserName[0] + + if (message === '/new') { + // 重置conversation id + cloud.shared.set(genConversationKey(userName), null) + sendWxMessage('新建成功,开始新的对话吧~~', userName) + return + } + + sendWxMessage('AI思考中, 请耐心等待~~', userName) + + try { + sendDifyMessage(message, userName, (message) => { + sendWxMessage(message, userName) + }) + } + catch (e) { + console.error('[sendDifyMessage] error', e) + sendWxMessage('接口请求失败,请联系管理员查看错误信息', userName) + } +} + +export default async function (ctx: FunctionContext) { + const { query } = ctx + const { msg_signature, timestamp, nonce, echostr } = query + const token = process.env.WXWORK_TOKEN + const key = process.env.WXWORK_AESKEY + console.log('[main] called', ctx.method, ctx.request.url) + + // 签名验证专用 + if (ctx.method === 'GET') { + const signature = getSignature(token, timestamp, nonce, echostr) + if (signature !== msg_signature) { + return { message: '签名验证失败', code: 401 } + } + const { message } = decrypt(key, echostr) + return message + } + + const payload = ctx.body.xml + const encrypt = payload.encrypt[0] + const signature = getSignature(token, timestamp, nonce, encrypt) + if (signature !== msg_signature) { + return { message: '签名验证失败', code: 401 } + } + + const { message } = decrypt(key, encrypt) + const { + xml + } = await xml2js.parseStringPromise(message) + // 由于GPT API耗时较久,这里提前返回,防止企业微信超时重试,后续再手动调用发消息接口 + ctx.response.sendStatus(200) + + await asyncSendMessage(xml) + + return { message: true, code: 0 } +} +``` diff --git a/zh_CN/use-cases/train-a-qa-chatbot-that-belongs-to-you.md b/zh_CN/use-cases/train-a-qa-chatbot-that-belongs-to-you.md new file mode 100644 index 0000000..d73c18b --- /dev/null +++ b/zh_CN/use-cases/train-a-qa-chatbot-that-belongs-to-you.md @@ -0,0 +1,119 @@ +# 如何训练出专属于“你”的问答机器人? + +_作者:阿乔_ + + + +_火枪击穿了专属于骑士阶层身披的铠甲与荣耀,武力对等才会有真正的平权。_ + +技术平权的速度从未来得如此之快。 + +作为兢兢业业的 CODING DevOps 项目的技术文档工程师,在为产品贡献数百篇文档后,依然止不住要在各种用户群 / 技术支持群里为客户解答问题。即使在每条答复中贴上文档链接,依然架不住新用户反复提问。(没办法,DevOps 产品中间又包含多项子产品) + +_注:CODING DevOps 是腾讯云旗下的一站式研发管理平台及云原生开发工具,旨在让软件研发如同工业生产般简单高效,助力提升企业研发管理效能。_ + +虽然由 ChatGPT 掀起 AI 革命已来,但是它并不能够回答“我的”问题,也不能回答“你的”问题。但它却可以回答“大家”的问题。 + +为什么?ChatGPT 本身便是由庞大的互联网语料库训练而成,并且这部分训练数据截止于 2021 年 9 月。如果你的产品在这个日期之前就有幸在互联网中占有一席之地,那恭喜你,GPT 可以大概率可以回答出你的产品是大概是干嘛用的。因为它会从你的官网收集已有的描述语料。具体到单独的产品或公司的细节时它便开始靠胡言乱语来敷衍你了。 + +例如这样 🔽 + +
+ +这里的“胡言乱语”指的是它会用其他不相关的概念进行回答,并且**你并不能够预期什么问题会有什么回答,这在商业场景下十分致命。**在大多数情况下,ChatGPT 对于一些带有**公理性质**的知识回答是没有问题的,作为一个可以日常侃大山闲聊的对象依然是合格的,可是你却看着它的强大的学习和理解能力望洋兴叹。 + +**“你这个孙悟空什么时候才能懂我,给我想要的回答?”** + +这也是为什么说它能够回应大家的问题,但是缺不能够回答出“我”想要的问题。 + + + +### **改变发生** + +直到,Dify 横空出世。 + +你可以简单地将 Dify 理解为是一个能够帮助你进行数据清理和文档训练的地方。咱们也给狗屁通(GPT)补补课,手动给它补上缺失的语料。把它从通用型聊天机器人,变成你的专业问答机器人。是的,从侃大山到对答如流,这中间的鸿沟不是一点半点,需要强大的理解、归纳总结能力。不过这正是狗屁通(GPT)的强项。 + +### + +### **注册并生成应用** + +现在,我们打开 Dify 并注册一个聊天机器人,我们在这里选择“对话型应用”。 + +
+ +Duang,一个聊天机器人就做好了。 + +
+ +访问这个链接就可以看到一个聊天机器人了。但是,它现在也只是一个**通用型聊天机器人**,离商业可用的**专用型问答机器人**还很差一个语料的距离。 + +
+ +### + +### **准备语料** + +Dify 提供了一个名叫“数据集”的功能,这里就是我们上传语料的地方。 + +
+ +现在点击“创建数据集”选项,上传你的文本内容。 + +
+ +目前 Dify 仅支持上传单个文件,文件格式支持 TXT、HTML、MarkDown、PDF。 + +_是的没错,你也可以把电子书上传进来,用 AI 帮助你提炼书本内容。_ + +在正式开始训练前,我们得先把教材准备好。好在 CODING 之前的所有文档皆以 Git 代码仓库 + Markdown 的形式进行协作,因此我只需要先把文档先拉到本地中。不过我现在碰到了第一个问题,那就是如何把仓库中的这一系列 .md 文档文件合并至同一个文件中? + +
+ +不怕,让 AI 给我们写一段 python 吧。 + +
+ +试着运行这个脚本,oh,输入路径后就成功生成了一篇总合文档!如果你想了解如何让 AI 给你写代码,不妨阅读[《如何用狗屁通(GPT )解决一个套娃问题》](http://mp.weixin.qq.com/s?__biz=MzU2Njg1NDA3Mw==\&mid=2247484248\&idx=1\&sn=50809b40f520c767483e1a7b0eefb9c1\&chksm=fca76b8ecbd0e298e627140d63e7b3383d226ab293a2e8fefa04b5a1ee12f187520560ec1579\&scene=21#wechat_redirect)。 + +
+ +### + +### **开始训练** + +现在要做的事,就是上传这个文件并静待训练成果了。 + +
+ +数据自动分段与清洗完成后,在刚创建的应用设置中的上下文,关联这一篇文档。为了保证响应速度和成本控制,这里直接用 3.5 模型。 + +
+ +### + +### **训练成功** + +现在再去和这个机器人互动,看看它会回答什么? + +
+ +好了,你已经是个成熟的问答机器人了,可以回答更加复杂的产品问题了。 + +
+ +
+ +
+ +
+ +这个问答机器人花了多久做出来,成本如何? + +整理语料+训练完成控制在半个小时以内,训练成本 0.27 美元。 + +现在,AI 可以深入到产品内部,变成妥妥的产品专家。只要有优秀的文档,所有公司都可以打造出专用型问答机器人。 + +武力对等才会有真正的平权。如果你对我的实践过程同样感兴趣,可以来体验一下我做好的机器人,地址:[https://udify.app/chat/F2Y4bKEWbuCb1FTC](https://udify.app/chat/F2Y4bKEWbuCb1FTC) + +不妨也来 Dify 平台训练属于你自己的机器人。做一个考试问答机器人?出题机器人? diff --git a/zh_CN/user-agreement/privacy-policy.md b/zh_CN/user-agreement/privacy-policy.md new file mode 100644 index 0000000..7a4f045 --- /dev/null +++ b/zh_CN/user-agreement/privacy-policy.md @@ -0,0 +1,128 @@ +# 隐私协议 + +欢迎来到 LangGenius 旗下网站服务 Dify.ai。LangGenius,Inc.(以下简称“LangGenius”,"Dify"、“Dify.AI”、“我们”,“我们的”或“我们”)尊重您的隐私,并致力于保护您的个人信息。本隐私政策旨在解释当您使用 Dify 服务时,我们如何收集、使用、保护和共享您提供的信息。 + +在使用或通过服务提交任何信息之前,请仔细阅读本政策。如果您不同意本政策,请不要访问或使用我们的服务或与我们业务的任何其他方面进行互动。除非您所在居住地的法律另有规定,否则通过使用我们的服务,您接受本政策中所述的隐私惯例。 + +### **我们收集关于您的哪些信息?** + +我们通过我们的网站、使用我们的产品和其他方式直接收集和存储您提供给我们的个人信息。 + +**您向我们提供的信息** + +我们将直接通过我们的网站收集和存储您直接向我们提供的个人信息,同时也通过其他方式收集和存储(例如通过用户支持请求、通过社交媒体互动、参与调查或促销活动、申请工作以及在我们的网站和活动中互动)。我们收集的信息包括但不限于以下内容: + +* 账户和个人资料信息。当您注册账户、创建或修改个人资料、设置偏好、通过服务注册或购买产品时,我们会收集关于您的信息,包括但不限于您的姓名、业务电话号码和电子邮件地址、密码以及用于身份验证和账户访问的类似安全信息。您还可以选择向我们提供显示名称、个人资料照片、职务和其他详情,以便在我们的服务中显示您的个人资料信息。 +* 您通过我们的产品提供的内容:作为服务的一部分,我们收集和存储您通过我们的产品发布、发送、接收和共享的内容。这包括您在我们产品的任何“自由文本”框中输入的任何数据,以及您上传到服务的文件和链接。我们收集和存储的内容示例包括:您在 Dify.AI 中创建的应用程序、与应用程序相关命令的描述、访问应用程序的链接、应用程序的隐私政策链接或您提供的任何其他信息。 +* 通过社区平台、即时消息工具或我们的网站提供的内容:我们还会收集您向我们提交的用于运营网站渠道(例如社交媒体或社交网络网站)的其他内容。例如,当您提供反馈或参与任何互动功能、调查、竞赛、促销活动、抽奖、活动或活动时,您通过电话、社区互动、即时消息服务等向我们提供内容(例如 GitHub、Twitter、Discord、微信、Slack 等)。 +* 通过我们的支持通道提供的信息:通过我们的用户支持,您可以选择提交有关使用我们的服务遇到的任何问题的信息。您可以通过电子邮件、第三方即时消息工具直接与我们的支持团队进行联系。我们将要求您提供联系信息、您面临的问题摘要以及任何其他可能有助于解决问题的文档、截图或信息。 +* 付款和账单信息:当您在 Dify.AI 上使用某些付费服务时,我们会收集您的付款和账单信息。您还可能需要向第三方安全支付处理服务提供商(例如 Stripe)提供信用卡信息。我们不存储您的信用卡信息。 + +**您使用服务时我们自动收集的信息** + +当您使用我们的服务(包括浏览我们的网站和在服务中执行某些操作)时,我们可能会收集有关您的信息。 + +* 您使用服务的情况:当您访问我们的任何服务并与之互动时,我们可能会跟踪有关您的某些信息,包括但不限于您使用的功能、您点击的链接、您上传到服务的附件的类型、大小和文件名以及您如何与或点击我们的产品服务。 +* 设备和连接信息:我们收集有关您用于访问我们的服务的设备的信息,例如您的计算机、手机、平板电脑或其他设备。这种设备信息还包括您在安装、访问、更新或使用我们的服务时的连接类型和设置。我们还通过您的设备收集有关您的操作系统、浏览器类型、引荐/退出页面的 URL、IP 地址、设备标识符和崩溃数据的信息。 +* 地理位置数据:根据您的设备设置,当您访问我们的网站和使用我们的产品时,我们可能会收集地理位置数据。例如,我们可能使用您的 IP 地址推断您的大致位置。 +* Cookie 和其他跟踪技术:我们和我们的第三方合作伙伴,例如我们的广告和分析合作伙伴,使用各种常见技术提供功能并在不同服务和设备之间识别您。此类技术通常包括跟踪像素、JavaScript 和各种“本地存储数据”技术,例如 Cookie 和本地存储。根据我们使用的技术,此类数据可能包括文本、个人信息(例如您的IP地址)以及有关您如何使用我们的服务的信息。为了本政策的目的,我们将这里识别的 Cookie 和其他技术统称为“Cookie”。大多数网络浏览器都有阻止 Cookie 的功能。您也可以选择清除计算机上存储的所有 Cookie。 + +**我们从其他来源收到的信息** + +我们从其他服务用户、我们的合作伙伴和第三方服务提供商、社交媒体平台和公共数据库收到有关您的信息。我们可能将此信息与通过其他方式收集的信息相结合。这有助于我们更新和改进我们的记录、识别新客户、创建更个性化的广告并推荐您可能感兴趣的服务。当要求提供个人信息时,您可以拒绝。但是,如果您选择不提供某些产品所需的信息,则这些产品或其某些功能可能无法使用或无法正常运行。 + +我们不对任何第三方的数据政策和程序或内容负责。我们建议您查看您访问的每个网站的隐私政策。 + +### **我们如何使用我们收集的信息?** + +我们收集和处理有关您的个人信息,因为需要提供您使用的产品、运营我们的网站和业务、履行我们的合同和法律义务、保护我们的系统和客户的安全或履行本隐私政策和向您发出通知的其他合法利益。 + +例如,我们可能会使用上述任何个人信息类别的任何一种来: + +* 操作、维护和改进我们的内部运营、系统、网站和产品。 +* 了解您和您的喜好,以增强您使用我们的网站和产品的体验和乐趣,提供建议,征求反馈并更好地市场和广告。 +* 监视和分析用户与我们的网站和产品的互动,以确定趋势、使用情况和活动模式。 +* 回答您的评论和问题并提供技术支持或客户服务。 +* 提供并交付您请求的产品。 +* 遵守适用法律、规则或法规并合作和捍卫法律索赔和审计。 +* 与您沟通有关 Dify.AI 和我们的合作伙伴提供的产品和服务的促销、即将举行的活动和其他新闻。 +* 计划和举办公司活动。 +* 保护网站和产品,并调查和防止欺诈、未经授权或非法活动。 + +我们还可能根据您提供信息的目的以及经过您同意的任何其他目的使用此类信息。 + +### **我们如何共享我们收集的信息?** + +我们可能会在您的同意下共享您的个人信息。我们还可能共享上述任何个人信息类别: + +* 与我们的业务合作伙伴和其他第三方服务提供商共享。我们与帮助我们运营、提供、改进、整合、定制、支持和营销我们的服务的第三方共享信息。例如,为向您提供服务,我们可能会与提供咨询支持的第三方服务合作伙伴共享信息。我们与提供网站和应用程序开发、托管、维护、备份、存储、虚拟基础设施、支付处理、分析和其他服务的第三方服务提供商合作。此类服务可能需要服务提供商访问或使用有关您的信息。如果服务提供商需要访问有关您的信息以代表我们执行服务,则他们将在我们的密切指导下执行此操作,并采用适当的安全和保密程序来保护您的信息。 +* 与潜在买家和顾问共享信息。如果公司出售、合并、重组、解散、类似事件或采取预期发生此类事件的措施(例如交易的尽职调查),则根据适用法律,您的个人信息可能(根据适用法律)与我们的顾问和任何潜在买家的顾问共享,并转移到业务的新所有者。 +* 共享信息以遵守法律法规。我们可能会根据法律或传票要求共享信息,或者如果我们合理地认为采取此类行动是必要的,以遵守适用法律或执法机构的合理要求、执行我们的服务条款或保护我们网站和产品的安全或完整性,或行使或保护我们的客户、用户或其他人的权利、财产或个人安全。 + +### **我们如何存储和保护我们收集的信息?** + +存储和处理。通过我们的网站和我们的产品收集的信息可能存储和处理在 LangGenius 或其关联公司或服务提供商维护设施所在的任何国家/地区,包括您所在的地区、美国、澳大利亚、加拿大、中国和欧洲经济区域(包括英国)。我们选择处理位置是为了确保高效运营、提高性能并创建冗余以保护数据在发生中断或其他问题时的安全。我们采取措施确保我们根据本隐私政策收集的数据是根据本隐私政策和适用法律进行处理的,无论数据所在的位置如何。 + +国际数据传输。当我们将个人信息从欧洲经济区(包括英国)和瑞士转移到欧洲委员会未确定其法律提供充分数据保护的美国或其他国家/地区时,我们使用旨在帮助确保您的权利和保护的法律机制,包括合同。具体而言,我们的网站服务器位于美国,我们的附属公司、合作伙伴、第三方和服务提供商在美国、欧洲经济区和中国运营。这意味着当我们收集您的个人信息时,我们可能在这些国家中处理它。但是,我们已采取适当的保障措施,要求您的个人信息按照本隐私政策受到保护。LangGenius 主要依赖欧洲委员会批准的数据保护标准合同条款作为主要保护措施。有关这些机制的更多信息,请使用下面“如何联系我们”部分提供的详细联系信息与我们联系。 + +保护您的信息安全。LangGenius 关心您的信息安全,并采取合理和适当的技术和组织措施,旨在防止个人信息的丢失、滥用和未经授权的访问、披露、更改和破坏。然而,没有任何安全系统是绝对安全的,我们无法保证我们的系统或您的信息的安全。 + +处理个人信息的合法基础(仅限欧洲经济区)。LangGenius 是您信息的数据控制者。 + +以下部分特别为您提供,如果您位于欧洲经济区(EEA)、英国或瑞士。 + +我们收集和使用上述个人信息的合法基础将取决于个人信息的相关性以及我们收集它的具体上下文。然而,我们通常只会在以下情况下收集个人信息:我们获得您的同意,我们需要个人信息以履行与您的合同,或者处理是基于我们的合法利益,且不会被您的数据保护利益或基本权利和自由所覆盖。在某些情况下,我们还可能有法律义务从您那里收集个人信息。 + +如果我们要求您提供个人信息以遵守法律要求或与您执行合同,我们将在相关时间指示这一点,并告知您是否必须提供您的个人信息(以及如果您不提供您的个人信息可能会产生的可能后果)。同样,如果我们在依靠我们的合法利益(或第三方的合法利益)收集和使用您的个人信息,我们将在相关时间向您指示这些合法利益是什么。 + +如果您对处理的合法基础有疑问或想了解更多信息,请使用下面“如何联系我们”部分提供的详细联系信息与我们联系。 + +保留。我们保留个人信息,只要处理个人信息的目的需要,以及为我们遵守适用法律所必需的更长时间。例如,我们将保留您的帐户信息,只要您的帐户处于活动状态或需要为您提供您请求或授权的产品,包括维护和改善产品性能以及保护系统安全。我们还保留个人数据,因为需要维护适当的业务和财务记录,保护我们的法律利益,解决争议或遵守法律或法规要求。此后,我们将删除或使其匿名化,或者如果不可能(例如,因为您的个人信息已存储在备份归档中),则将使用适当的安全措施存储您的个人信息,并采取适当的步骤来将其与进一步处理隔离,直到可以删除。 + +### **如何访问和控制您的信息?** + +根据适用法律,您对个人信息享有某些权利。这些包括以下权利: + +* 访问您的个人信息:您有权要求我们确认我们是否正在处理您的个人信息,并在这种情况下,访问个人信息并获得有关如何处理您的数据的信息,以及要求我们提供您的个人信息副本。 +* 更正您的个人信息:您有权更正我们持有的有关您的任何不正确、不完整或不准确的数据。 +* 删除您的个人信息:您有权在以下情况下要求我们删除您的个人信息,例如,我们持有的数据不再需要,或者您的数据已被非法处理。 +* 反对处理:您有权反对处理您的个人信息,并要求我们停止处理您的个人信息,如果例如,这些数据正在被处理用于直接营销或者我们正在依靠合法利益(或第三方的利益)。在某些情况下,我们可能需要证明我们有压倒性的合法理由来处理您的信息,这些理由优先于您的权利和自由。 +* 限制处理:您有权要求我们暂停处理您的个人信息,在以下情况下:(a)如果您希望我们确定个人信息的准确性;(b)如果我们使用数据是非法的,但您不希望它被删除;(c)如果您需要我们继续持有数据,即使我们不再需要它,因为您需要它来建立、行使或捍卫法律权利;或(d)如果您反对我们使用您的数据,但我们需要验证我们是否有优先的合法理由使用它。 +* 以可用的电子格式接收您的个人信息并将其传输给第三方(数据可移植权):如果我们根据您的同意或合同处理您的个人信息,您可以要求以结构化、通用和机器可读的格式接收您的个人信息。在没有我们任何障碍的情况下,您还可以要求我们将这些数据传输给另一个控制者。 +* 撤回同意:如果我们依靠您的同意来处理您的个人信息,您有权随时撤回您的同意。但是,这不会影响在您撤回您的同意之前进行的任何处理的合法性。如果您撤回您的同意,我们将不再处理该个人信息,但我们可能无法继续向您提供某些产品或服务,这些产品或服务是为了寻求个人信息而寻求的。在您撤回同意时,我们将告知您是否属于这种情况。 +* 退出通信:通过在每封电子邮件中使用取消订阅链接,在您的服务帐户设置菜单中更新您的电子邮件首选项,或通过如下所述的联系我们,您可以选择退出接收我们的推广通信。即使在您退出接收我们的推广信息后,您仍将继续收到我们的服务相关的交易性消息。您可以在帐户设置中选择退出某些通知消息。请注意,您仍将继续收到通用广告。 +* 发送“不跟踪”信号:一些浏览器已经集成了“不跟踪”(DNT)功能,这些功能可以向您访问的网站发送信号,指示您不希望被跟踪。由于尚未对如何解释 DNT 信号达成共识,因此我们的服务目前不会响应浏览器 DNT 信号。您可以使用我们提供的其他工具范围来控制数据收集和使用,包括选择退出接收我们如上所述的营销。 + +在某些情况下,这些权利可能会受到限制,例如,我们可以证明我们有法律要求处理您的数据(例如,税务机构要求我们保留数据)或者它是需要履行合同的。在某些情况下,这可能意味着即使您撤回同意,我们也能够保留数据。 + +如果管理员为您管理服务(请参见下面“终端用户通知”),您可能需要首先联系您的管理员以协助您的请求。对于所有其他请求,请通过下面“如何联系我们”部分提供的详细联系信息与我们联系。如果您有未解决的问题,您可能有权向您居住的国家/地区的数据保护机构投诉,或者您工作或感到您的权利受到侵害的国家/地区的数据保护机构投诉。 + +如果您是加利福尼亚州居民,请您仔细阅读下面的“加利福尼亚要求”以了解您的权利和其他重要信息。 + +### **其他重要隐私信息** + +**加利福尼亚要求** + +如果您是加利福尼亚居民,根据《加利福尼亚消费者保护法》(“CCPA”),您可能享有其他权利。除了上述“如何访问和控制您的信息”中列出的权利外,我们将在以下段落中向您解释如何行使您在 CCPA 下的权利: + +* 反对歧视权利:您有权不因行使本节所述任何权利而遭受歧视。我们不会因您行使知情权、删除权或选择退出销售权而歧视您。 +* 选择退出销售权:您有权选择退出销售您的个人信息。我们不会出售您的个人信息。CCPA 下的“个人信息”和“销售”术语被广泛定义,因此分享与您相关的标识符以获得利益可能被视为销售。您有权了解我们出售有关您的个人信息的类型以及我们与之共享此类信息的第三方的类型。 +* 处理您的信息:本政策概述了我们可能收集的个人信息类型、这些信息的来源,以及有关删除和保留的规则。我们还包括有关我们如何处理您的信息的信息,包括 CCPA 定义的“业务目的”,如保护非法活动,以及开发新产品、功能和技术。如果您对我们可能收集有关您的信息的类别有疑问,请参阅本政策的“我们收集有关您的哪些信息”部分。您还可以参考“我们如何使用我们收集的信息”部分,以获取有关我们处理活动的更多详细信息。 + +### **我们对儿童的政策** + +我们的服务不面向 18 岁以下的儿童,我们也不会故意收集 18 岁以下儿童的个人信息。如果我们得知 18 岁以下儿童向我们提供了个人信息,我们将立即从系统中删除此类个人数据。如果您得知或有理由相信儿童通过我们的服务向我们提供了个人信息,请在下面“如何联系我们”部分提供的详细信息中与我们联系,我们将从我们的数据库中删除该信息。 + +**我们政策的更改** + +我们可能随时修改此政策,无需事先通知,更改可能适用于我们已经持有的任何个人信息,以及政策修改后收集的任何新个人信息。如果我们进行更改,我们将通过修改此政策顶部的日期来通知您。如果我们对如何收集、使用或披露您的个人信息进行任何重大更改,将提供更加高级和突出的通知,以影响您在此政策下的权利。除非您所在地的法律另有规定,否则在收到更改通知后,您继续访问或使用我们的服务,即表示您确认接受更新的政策。 + +此外,我们可能会向您提供有关我们的服务特定部分的个人信息处理做法的实时披露或其他信息。此类通知可能补充本政策或为您提供有关我们如何处理您的个人信息的其他选择。如果您不同意本政策的任何更改,您需要停止使用服务并注销您的帐户,如上所述。 + +### **如何联系我们?** + +您的信息由 LangGenius 控制。如果您对如何处理您的信息有疑问或关注,请直接向负责协调此类查询的 LangGenius 提出查询。 + +LangGenius,Inc.,美国特拉华州注册公司(文件编号 7358523)。 + +电子邮件:hello@dify.ai. diff --git a/zh_CN/user-agreement/terms-of-service.md b/zh_CN/user-agreement/terms-of-service.md new file mode 100644 index 0000000..34a095c --- /dev/null +++ b/zh_CN/user-agreement/terms-of-service.md @@ -0,0 +1,25 @@ +# 服务协议 + +以下文件说明了我们网站和服务的使用条件。 + +LangGenius, Inc.(“LangGenius”)是一家美国公司,注册办公地址位于美国特拉华州北布罗德街 651 号 201 室,文件号 7358523,由其 CEO 张路宇先生代表。 + +LangGenius 很高兴为您提供访问 Dify 网站([dify.ai](https://dify.ai/))(以下简称“网站”、“Dify”或“Dify.AI”)和相关应用程序和资源(统称为“服务”)的机会。您的服务使用受制于下面的具有约束力的法律协议(“条款”)。 + +### **Beta软件** + +我们可能会提供您使用 beta 和实验性产品,功能和文档(“Beta 软件”)以便进行早期访问。 Beta 软件不是普遍可用的,可能包含错误,缺陷和不准确之处。我们提供 Beta 软件“按原样”无任何保证,并且可能随时终止 Beta 软件而不保证Beta软件数据的保存。我们的服务级别协议不适用于 Beta 软件。如果 Beta 软件变得普遍可用,则您可以选择支付软件费用或停止使用 Beta 软件。我们可能会利用您对 Beta 软件的反馈。 + +### **Dify 服务的使用** + +您可以使用 Dify.AI 开发平台为您的目标用户群创建软件应用程序,以实现商业目标。 您了解并认可基于Dify.AI 创建的软件应用程序所产生的语言和信息是源于用户编写的提示和其大型语言模型(LLM)提供者(例如 OpenAI)。 LLM 是一项不成熟的技术,由于商业化应用无法满足您的运营预期(包括但不限于未达到您的广告描述预期,目标效果描述预期等等)或者遇到服务中断等问题而产生的任何负面后果均与 LangGenius 无关。 + +对 LLM 模型的提示开发是 Dify 提供的核心产品能力和服务。 在您使用 Dify 进行 LLM 模型的提示工程、嵌入、微调和测试期间,可能会发生附加资源消耗。 注册或开始使用 Dify 时,您同意该产品的 LLM 算法可能会消耗资源和产生成本。 您应密切监视来自第三方 LLM 提供商的计费变化。 您应承担所有异常消耗和计费问题的后果,这可能会阻止您正常使用 Dify 服务。 + +### **知识产权** + +服务、标志、界面、照片、网站归 LangGenius 或其许可方所有,受适用法律保护,保留所有权利。 LangGenius 非常重视知识产权,包括版权的保护。如果您侵犯或盗用他人的知识产权,包括版权,LangGenius 将终止您对网站服务的访问或使用,而无需通知您。 + +### **适用法律** + +本条款受美国加利福尼亚州法律管辖。 diff --git a/zh_CN/web-application/conversation-application.md b/zh_CN/web-application/conversation-application.md new file mode 100644 index 0000000..68b2312 --- /dev/null +++ b/zh_CN/web-application/conversation-application.md @@ -0,0 +1,50 @@ +# 对话型应用 + +对话型应用采用一问一答模式与用户持续对话。对话型应用支持以下能力(请确认应用编排时有开启以下功能): + +* 对话前填写的变量。 +* 对话的创建、置顶、删除。 +* 对话开场白。 +* 下一步问题建议。 +* 语音转文字。 + +### 对话前填写的变量 + +如你在应用编排时有设置变量的填写要求,则在对话前需要按提示填写信息才可进入对话窗口: + +
+ +填写必要内容,点击 “开始对话” 按钮,开始聊天。 + +
+ +移动到 AI 的回答上,可以复制会话内容,给回答 “赞” 和 “踩”。 + +
+ +### 对话的创建、置顶和删除 + +点击 “新对话” 按钮开始一个新的对话。移动到一个会话上,可以对会话进行 “置顶” 和 “删除” 操作。 + +
+ +### 对话开场白 + +若在应用编排时开启了「对话开场白」功能,则在创建一个新对话时 AI 应用会自动发起第一句对话: + +
+ +### 下一步问题建议 + +若在应用编排时开启了「下一步问题建议」功能,则在对话后系统自动生成 3 个相关问题建议: + +
+ +### 语音转文字 + +若在应用编排时开启了「语音转文字」功能,则在 Web 应用端的输入框看到语音输入的图标,点击图标即可语音输入转成文字: + +_请注意确保你使用的设备环境已经授权使用麦克风。_ + +
+ diff --git a/zh_CN/web-application/overview.md b/zh_CN/web-application/overview.md new file mode 100644 index 0000000..50bd043 --- /dev/null +++ b/zh_CN/web-application/overview.md @@ -0,0 +1,35 @@ +# 概览 + +Web 应用是给应用使用者用的。应用开发者在 Dify 创建一个应用,就会获得一个对应的 Web 应用。Web 应用的使用者无需登陆,即可使用。Web 应用已适配不同尺寸的设备:PC,平板和手机。 + + + +Web 应用的内容和应用发布的配置一致。当修改了应用的配置,并在应用的提示词编排页点 “发布” 按钮发布后,Web 应用的内容也会根据当前应用的配置做更新。 + + + +我们可以在应用概览页开启和关闭对 Web 应用的访问,以及修改 Web 应用的站点信息: + +* 图标 +* 名称 +* 应用描述 +* 界面语言 +* 版权信息 +* 隐私政策链接 + +Web 应用的功能表现取决于开发者在编排应用时是否开启该功能,例如: + +* 对话开场白 +* 对话前填写的变量 +* 下一步问题建议 +* 语音转文字 +* 更多类似的答案(文本型应用) +* ...... + + + +在下面的章节,我们会分别介绍 Web 应用的两种类型: + +* 文本生成型 +* 对话型 + diff --git a/zh_CN/web-application/text-generator.md b/zh_CN/web-application/text-generator.md new file mode 100644 index 0000000..68b6309 --- /dev/null +++ b/zh_CN/web-application/text-generator.md @@ -0,0 +1,58 @@ +# 文本生成型应用 + +文本生成类应用是一种根据用户提供的提示,自动生成高质量文本的应用。它可以生成各种类型的文本,例如文章摘要、翻译等。 + + + +文本生成型应用支持如下功能: + +1. 运行一次。 +2. 批量运行。 +3. 保存运行结果。 +4. 生成更多类似结果。 + +下面我们分别来介绍。 + +### 运行一次 + +输入查询内容,点击运行按钮,右侧会生成结果,如下图所示: + +
+ +在生成的结果部分,点 “复制” 按钮可以将内容复制到剪贴板。点 “保存” 按钮可以保存内容。可以在 “已保存” 选项卡中看到保存过的内容。也可以对生成的内容点 “赞” 和 “踩”。 + +### 批量运行 + +有时,我们需要运行一个应用很多次。比如:有个 Web 应用可以根据主题来生成文章。现在要生成 100 篇不同主题的文章。那么这个任务要做 100 次,很麻烦。而且,必须等一个任务完成才能开始下一个任务。 + +上面的场景,用批量运行功能,操作便利(把主题录入一个 `csv` 文件,只需执行一次),也节约了生成的时间(多个任务同时运行)。使用方式如下: + +#### 第 1 步 进入批量运行页面 + +点击 “批量运行” 选项卡,则会进入批量运行页面。 + +
+ +#### 第 2 步 下载模版并填写内容 + +点击下载模版按钮,下载模版。编辑模版,填写内容,并另存为 `.csv` 格式的文件。 + +
+ +#### 第 3 步 上传文件并运行 + +
+ +如果需要导出生成的内容,可以点右上角的下载 “按钮” 来导出为 `csv` 文件。 + +### 保存运行结果 + +点击生成结果下面的 “保存” 按钮,可以保存运行结果。在 “已保存” 选项卡中,可以看到所有已保存的内容。 + +
+ +### 生成更多类似结果 + +如果在应用编排时开启了 “更多类似” 的功能。在 Web 应用中可以点击 “更多类似” 的按钮来生成和当前结果相似的内容。如下图所示: + +