diff --git a/en/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png b/en/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png
new file mode 100644
index 0000000..8c20eca
Binary files /dev/null and b/en/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (1) (1) (1) (1) (1).png b/en/.gitbook/assets/image (1) (1) (1) (1) (1).png
index 8c20eca..d11476b 100644
Binary files a/en/.gitbook/assets/image (1) (1) (1) (1) (1).png and b/en/.gitbook/assets/image (1) (1) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (1) (1) (1) (1).png b/en/.gitbook/assets/image (1) (1) (1) (1).png
index d11476b..abbd931 100644
Binary files a/en/.gitbook/assets/image (1) (1) (1) (1).png and b/en/.gitbook/assets/image (1) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (1) (1) (1).png b/en/.gitbook/assets/image (1) (1) (1).png
index abbd931..0fdfeda 100644
Binary files a/en/.gitbook/assets/image (1) (1) (1).png and b/en/.gitbook/assets/image (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (1) (1).png b/en/.gitbook/assets/image (1) (1).png
index 0fdfeda..3db705f 100644
Binary files a/en/.gitbook/assets/image (1) (1).png and b/en/.gitbook/assets/image (1) (1).png differ
diff --git a/en/.gitbook/assets/image (1).png b/en/.gitbook/assets/image (1).png
index 3db705f..6a3c26b 100644
Binary files a/en/.gitbook/assets/image (1).png and b/en/.gitbook/assets/image (1).png differ
diff --git a/en/.gitbook/assets/image (10).png b/en/.gitbook/assets/image (10).png
index 14e5b94..5ddefe5 100644
Binary files a/en/.gitbook/assets/image (10).png and b/en/.gitbook/assets/image (10).png differ
diff --git a/en/.gitbook/assets/image (14).png b/en/.gitbook/assets/image (14).png
index f8b59b3..3e26e1a 100644
Binary files a/en/.gitbook/assets/image (14).png and b/en/.gitbook/assets/image (14).png differ
diff --git a/en/.gitbook/assets/image (2) (1) (1) (1) (1).png b/en/.gitbook/assets/image (2) (1) (1) (1) (1).png
new file mode 100644
index 0000000..4f9324b
Binary files /dev/null and b/en/.gitbook/assets/image (2) (1) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (2) (1) (1) (1).png b/en/.gitbook/assets/image (2) (1) (1) (1).png
index 4f9324b..6ea5c61 100644
Binary files a/en/.gitbook/assets/image (2) (1) (1) (1).png and b/en/.gitbook/assets/image (2) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (2) (1) (1).png b/en/.gitbook/assets/image (2) (1) (1).png
index 6ea5c61..f19b0c0 100644
Binary files a/en/.gitbook/assets/image (2) (1) (1).png and b/en/.gitbook/assets/image (2) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (2) (1).png b/en/.gitbook/assets/image (2) (1).png
index f19b0c0..eddf580 100644
Binary files a/en/.gitbook/assets/image (2) (1).png and b/en/.gitbook/assets/image (2) (1).png differ
diff --git a/en/.gitbook/assets/image (2).png b/en/.gitbook/assets/image (2).png
index eddf580..6a3c26b 100644
Binary files a/en/.gitbook/assets/image (2).png and b/en/.gitbook/assets/image (2).png differ
diff --git a/en/.gitbook/assets/image (3) (1) (1) (1) (1).png b/en/.gitbook/assets/image (3) (1) (1) (1) (1).png
new file mode 100644
index 0000000..46e504d
Binary files /dev/null and b/en/.gitbook/assets/image (3) (1) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (3) (1) (1) (1).png b/en/.gitbook/assets/image (3) (1) (1) (1).png
index 46e504d..24035bb 100644
Binary files a/en/.gitbook/assets/image (3) (1) (1) (1).png and b/en/.gitbook/assets/image (3) (1) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (3) (1) (1).png b/en/.gitbook/assets/image (3) (1) (1).png
index 24035bb..2b9f28f 100644
Binary files a/en/.gitbook/assets/image (3) (1) (1).png and b/en/.gitbook/assets/image (3) (1) (1).png differ
diff --git a/en/.gitbook/assets/image (3) (1).png b/en/.gitbook/assets/image (3) (1).png
index 2b9f28f..ecb5d46 100644
Binary files a/en/.gitbook/assets/image (3) (1).png and b/en/.gitbook/assets/image (3) (1).png differ
diff --git a/en/.gitbook/assets/image (3).png b/en/.gitbook/assets/image (3).png
index ecb5d46..090c776 100644
Binary files a/en/.gitbook/assets/image (3).png and b/en/.gitbook/assets/image (3).png differ
diff --git a/en/.gitbook/assets/image.png b/en/.gitbook/assets/image.png
index 090c776..4f2d7dd 100644
Binary files a/en/.gitbook/assets/image.png and b/en/.gitbook/assets/image.png differ
diff --git a/en/SUMMARY.md b/en/SUMMARY.md
index 5c91035..9586b54 100644
--- a/en/SUMMARY.md
+++ b/en/SUMMARY.md
@@ -43,6 +43,7 @@
* [Prompt Template](features/prompt-engineering/prompt-template.md)
* [Workflow](features/workflow/README.md)
* [Introduce](features/workflow/introduce.md)
+ * [Key Concept](features/workflow/key-concept.md)
* [Node](features/workflow/node/README.md)
* [Start](features/workflow/node/start.md)
* [End](features/workflow/node/end.md)
diff --git a/en/features/annotation-reply.md b/en/features/annotation-reply.md
index b4200f4..e5814d6 100644
--- a/en/features/annotation-reply.md
+++ b/en/features/annotation-reply.md
@@ -19,7 +19,7 @@ The feature provides an alternative system for enhancing retrieval, skipping the
4. Without a match, the query follows the standard LLM or RAG process.
5. Deactivating Annotation Reply ceases matching replies from the annotations.
-
Annotation Reply Process
+
Annotation Reply Process
## Activation
diff --git a/en/features/datasets/README.md b/en/features/datasets/README.md
index 3d60773..8ae8ec5 100644
--- a/en/features/datasets/README.md
+++ b/en/features/datasets/README.md
@@ -1,4 +1,4 @@
-# Knowledge\&Index
+# Knowledge Import
Most language models use outdated training data and have length limitations for the context of each request. For example, GPT-3.5 is trained on corpora from 2021 and has a limit of approximately 4k tokens per request. This means that developers who want their AI applications to be based on the latest and private context conversations must use techniques like embedding.
@@ -78,9 +78,9 @@ Modify Documents For technical reasons, if developers make the following changes
Dify support customizing the segmented and cleaned text by adding, deleting, and editing paragraphs. You can dynamically adjust your segmentation to make your knowledge more accurate. Click **Document --> paragraph --> Edit** in the knowledge to modify paragraphs content and custom keywords. Click **Document --> paragraph --> Add segment --> Add a segment** to manually add new paragraph. Or click **Document --> paragraph --> Add segment --> Batch add** to batch add new paragraph.
-
Edit
+
Edit
-
add
+
add
### Disabling and Archiving of Documents
diff --git a/en/features/datasets/sync-from-notion.md b/en/features/datasets/sync-from-notion.md
index eb906de..7d2219f 100644
--- a/en/features/datasets/sync-from-notion.md
+++ b/en/features/datasets/sync-from-notion.md
@@ -43,7 +43,7 @@ Click the " **New integration** " button, the type is Internal by default (canno
Once the integration is created, you can update its settings as needed under the **Capabilities** tab and click the "**Show**" button under **Secrets** and then copy the Secrets.
-
+
Copy it and back to the Dify source code , in the **.env** file configuration related environment variables, environment variables as follows:
@@ -57,11 +57,11 @@ Copy it and back to the Dify source code , in the **.env** file configuration re
To toggle the switch to public settings, you need to **fill in additional information in the Organization Information** form below, including your company name, website, and Retargeting URL, and click the "Submit" button.
-
+
After your integration has been successfully made public in your [integration’s settings page](https://www.notion.so/my-integrations), you will be able to access the integration’s secrets in the Secrets tab.
-
+
Back to the Dify source code , in the **.env** file configuration related environment variables , environment variables as follows:
diff --git a/en/features/retrieval-augment/README.md b/en/features/retrieval-augment/README.md
index 339c2b3..ed0ed83 100644
--- a/en/features/retrieval-augment/README.md
+++ b/en/features/retrieval-augment/README.md
@@ -8,14 +8,14 @@ Developers can utilize this technology to cost-effectively build AI-powered cust
In the diagram below, when a user asks, "Who is the President of the United States?", the system doesn't directly relay the question to the large model for an answer. Instead, it first conducts a vector search in a knowledge base (like Wikipedia, as shown in the diagram) for the user's query. It finds relevant content through semantic similarity matching (for instance, "Biden is the current 46th President of the United States…"), and then provides the user's question along with the found knowledge to the large model. This enables the model to have sufficient and complete knowledge to answer the question, thereby yielding a more reliable response.
-
Basic Architecture of RAG
+
Basic Architecture of RAG
-## Why is this necessary?
+## Why is this necessary?
-We can liken a large model to a super-expert, knowledgeable in various human domains. However, this expert has its limitations; for example, it doesn't know your personal situation, as such information is private and not publicly available on the internet, and therefore, it hasn't had the opportunity to learn it beforehand.
+We can liken a large model to a super-expert, knowledgeable in various human domains. However, this expert has its limitations; for example, it doesn't know your personal situation, as such information is private and not publicly available on the internet, and therefore, it hasn't had the opportunity to learn it beforehand.
-When you want to hire this super-expert as your family financial advisor, you need to allow them to review your investment records, household expenses, and other relevant data before they can respond to your inquiries. This enables them to provide professional advice tailored to your personal circumstances.
+When you want to hire this super-expert as your family financial advisor, you need to allow them to review your investment records, household expenses, and other relevant data before they can respond to your inquiries. This enables them to provide professional advice tailored to your personal circumstances.
-**This is what the RAG system does: it helps the large model temporarily acquire external knowledge it doesn't possess, allowing it to search for answers before responding to a question.**
+**This is what the RAG system does: it helps the large model temporarily acquire external knowledge it doesn't possess, allowing it to search for answers before responding to a question.**
Based on this example, it's evident that the most critical aspect of the RAG system is the retrieval of external knowledge. The expert's ability to provide professional financial advice depends on accurately finding the necessary information. If the expert retrieves information unrelated to financial investments, like a family weight loss plan, even the most capable expert would be ineffective.
diff --git a/en/features/retrieval-augment/hybrid-search.md b/en/features/retrieval-augment/hybrid-search.md
index f4407b8..298f37e 100644
--- a/en/features/retrieval-augment/hybrid-search.md
+++ b/en/features/retrieval-augment/hybrid-search.md
@@ -29,13 +29,13 @@ In most text search scenarios, it's crucial to ensure that the most relevant res
In Hybrid Search, vector and keyword indices are pre-established in the database. Upon user query input, the system searches for the most relevant text in documents using both search methods.
-
Hybrid Search
+
Hybrid Search
"Hybrid Search" doesn't have a definitive definition; this article exemplifies it as a combination of Vector Search and Keyword Search. However, the term can also apply to other combinations of search algorithms. For instance, we could combine knowledge graph technology, used for retrieving entity relationships, with Vector Search.
Different search systems each excel at uncovering various subtle connections within texts (paragraphs, sentences, words), including precise relationships, semantic relationships, thematic relationships, structural relationships, entity relationships, temporal relationships, and event relationships. It's safe to say that no single search mode is suitable for all scenarios. **Hybrid Search, by integrating multiple search systems, achieves a complementarity among various search technologies.**
-## Vector Search
+## Vector Search
Definition: Vector Search involves generating query embeddings and then searching for text chunks that most closely match these embeddings in terms of vector representation.
@@ -47,7 +47,7 @@ Definition: Vector Search involves generating query embeddings and then searchin
**Rerank Model:** After configuring the Rerank model's API key on the "Model Provider" page, you can enable the "Rerank Model" in the search settings. The system then performs a semantic re-ranking of the document results that have been recalled after semantic search, optimizing the order of these results. Once the Rerank model is set up, the TopK and Score threshold settings are only effective in the Rerank step.
-## Full-Text Search
+## Full-Text Search
Definition: Full-Text Search involves indexing all the words in a document, enabling users to query any term and retrieve text chunks that contain these terms.
@@ -67,7 +67,7 @@ Hybrid Search operates by concurrently executing Full-Text Search and Vector Sea
**Rerank Model:** After configuring the Rerank model's API key on the "Model Supplier" page, you can enable the "Rerank Model" in the search settings. The system will perform a semantic re-ranking of the document results retrieved through hybrid search, thereby optimizing the order of these results. Once the Rerank model is set up, the TopK and any Score threshold settings are only applicable during the Rerank step.
-## Setting the Search Mode When Creating a Knowledge
+## Setting the Search Mode When Creating a Knowledge
To set the search mode when creating a knowledge base, navigate to the "Knowledge -> Create Knowledge" page. There, you can configure different search modes in the retrieval settings section.
diff --git a/en/features/workflow/introduce.md b/en/features/workflow/introduce.md
index 3ba7746..e8efa04 100644
--- a/en/features/workflow/introduce.md
+++ b/en/features/workflow/introduce.md
@@ -1,2 +1,35 @@
-# Concept
+# Introduce
+### Introduce
+
+Workflow reduces system complexity by breaking complex tasks into smaller steps (nodes), reducing dependence on prompt word technology and model inference capabilities, enhancing the performance of LLM applications for complex tasks, and improving system explainability, stability, and fault tolerance. Dify workflows are divided into two types based on application scenarios:
+
+* **Chatflow**: For conversational scenarios, including customer service, semantic search, and other conversational applications that require multi-step logic in building responses.
+* **Workflow**: For automation and batch processing scenarios, suitable for high-quality translation, data analysis, content creation, email automation, etc.
+
+To address the complexity of user intent recognition in natural language inputs, Chatflow provides problem understanding nodes, such as question classification, question rewriting, sub-question splitting, etc. In addition, it will also provide LLM with the ability to interact with the external environment, i.e., tool invocation capability, such as online search, mathematical calculation, weather query, drawing, etc.
+
+
+
+To solve complex business logic in automation and batch processing scenarios, Workflow provides a wealth of logic nodes, such as code nodes, IF/ELSE nodes, merge nodes, template conversion nodes, etc. In addition, it will also provide the ability to trigger by time and event, facilitating the construction of automated processes.
+
+
+
+### Common Cases
+
+**Customer Service** By integrating LLM into your customer service system, you can automate the answering of common questions, reducing the workload of the support team. LLM can understand the context and intent of customer queries and generate helpful and accurate responses in real-time.
+
+**Content Generation** Whether you need to create blog posts, product descriptions, or marketing materials, LLM can assist you by generating high-quality content. Just provide an outline or topic, and LLM will use its extensive knowledge base to produce engaging, informative, and well-structured content.
+
+**Task Automation** Can be integrated with various task management systems, such as Trello, Slack, Lark, to automate project and task management. By using natural language processing, LLM can understand and interpret user inputs, create tasks, update statuses, and assign priorities without manual intervention.
+
+**Data Analysis and Reporting** Can be used to analyze large datasets and generate reports or summaries. By providing relevant information to LLM, it can identify trends, patterns, and insights, transforming raw data into actionable intelligence. This is especially valuable for businesses that wish to make data-driven decisions.
+
+**Email Automation** LLM can be used to draft emails, social media updates, and other forms of communication. By providing a brief outline or key points, LLM can generate a well-structured, coherent, and contextually relevant message. This can save a significant amount of time and ensure your responses are clear and professional.
+
+### How to Start
+
+* Start building from a blank workflow or use system templates to help you start.
+* Familiarize yourself with basic operations, including creating nodes on the canvas, connecting and configuring nodes, debugging workflows, viewing run history, etc.
+* Save and publish a workflow.
+* Run the published application or call the workflow through an API.
diff --git a/en/features/workflow/key-concept.md b/en/features/workflow/key-concept.md
new file mode 100644
index 0000000..5c8ba23
--- /dev/null
+++ b/en/features/workflow/key-concept.md
@@ -0,0 +1,49 @@
+# Key Concept
+
+### Node
+
+Nodes are the key components of a workflow. By connecting nodes with different functionalities, a series of operations within the workflow are executed. Nodes are categorized by type:
+
+* Basic Nodes:Start, End, Answer, LLM, Knowledge Retrieval, Applications (coming soon)
+* Question Understand:Quesition Classifier,Question Rewriting (coming soon), Sub-question Splitting (coming soon)
+* Logic Processing:IF/ELSE, Merge (coming soon), Loop (coming soon)
+* Transformation:Code, Template, Variable Assigner, Function Extraction (coming soon)
+* Others:HTTP Request
+* Tools:Built-in Tools, Custom Tools
+
+### Variables
+
+Variables are crucial for linking the input and output of nodes within a workflow, facilitating the implementation of complex processing logic throughout the process.
+
+* Workflows need to define input variables for initiating execution or conversation.
+* Nodes require input variables for initiation; for instance, the input variable for a question classifier typically consists of the user's question.
+* Variables referenced within a node can only be those from preceding process nodes to ensure coherence and avoid duplication.
+* To prevent variable name duplication, node names must be unique.
+* The output variables of a node are fixed by the system and are not subject to modification.
+
+### Differences between Chatflow and Workflow
+
+**Application Scenario Differences**
+
+* **Chatflow**: Targets conversational scenarios and represents an advanced orchestration mode for Chatbot application types.
+* **Workflow**: Geared towards automation and batch processing scenarios.
+
+**Differences in Nodes**
+
+| **Node** | **Chatflow** | **Workflow** |
+| ------------------- | --------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
+| Start | Utilizes system-built variables for user input and file uploads | Utilizes system-built variables for file uploads |
+| End |
Not support End node
| Uses an End node to output structured text at the conclusion of execution, which is not designed for mid-process output. |
+| Answer | The Answer node is used for streaming output or fixed text replies and can be utilized mid-process. | Not support Answer node |
+| LLM | Memory is automatically enabled to store and pass on the history of multi-turn dialogues. |
Not support Memory configuration
|
+| Question Classifier | Memory is automatically enabled to store and pass on the history of multi-turn dialogues. | Not Support Memory configuration |
+
+#### Application Entry Division
+
+* **Chatflow Entry**:
+
+
+
+* **Workflow Entry**:
+
+
diff --git a/en/tutorials/model-configuration/xinference.md b/en/tutorials/model-configuration/xinference.md
index 7ab6729..1060589 100644
--- a/en/tutorials/model-configuration/xinference.md
+++ b/en/tutorials/model-configuration/xinference.md
@@ -27,18 +27,18 @@ There are two ways to deploy Xinference, namely [local deployment](https://githu
```
Xinference will start a worker locally by default, with the endpoint: `http://127.0.0.1:9997`, and the default port is `9997`. By default, access is limited to the local machine only, but it can be configured with `-H 0.0.0.0` to allow access from any non-local client. To modify the host or port, you can refer to xinference's help information: `xinference-local --help`.
+
> If you use the Dify Docker deployment method, you need to pay attention to the network configuration to ensure that the Dify container can access the endpoint of Xinference. The Dify container cannot access localhost inside, and you need to use the host IP address.
-
3. Create and deploy the model
- Visit `http://127.0.0.1:9997`, select the model and specification you need to deploy, as shown below:
+ Visit `http://127.0.0.1:9997`, select the model and specification you need to deploy, as shown below:
-
+
As different models have different compatibility on different hardware platforms, please refer to [Xinference built-in models](https://inference.readthedocs.io/en/latest/models/builtin/index.html) to ensure the created model supports the current hardware platform.
4. Obtain the model UID
- Copy model ID from `Running Models` page, such as: `2c886330-8849-11ee-9518-43b0b8f40bea`
+ Copy model ID from `Running Models` page, such as: `2c886330-8849-11ee-9518-43b0b8f40bea`
5. After the model is deployed, connect the deployed model in Dify.
In `Settings > Model Providers > Xinference`, enter:
diff --git a/en/user-guide/creating-dify-apps/prompt-engineering/conversation-application.md b/en/user-guide/creating-dify-apps/prompt-engineering/conversation-application.md
index 69a8d0a..8e58b5b 100644
--- a/en/user-guide/creating-dify-apps/prompt-engineering/conversation-application.md
+++ b/en/user-guide/creating-dify-apps/prompt-engineering/conversation-application.md
@@ -22,7 +22,7 @@ Click the "Create Application" button on the homepage to create an application.
After the application is successfully created, it will automatically redirect to the application overview page. Click on the left-hand menu: “**Prompt Eng.**” to compose the application.
-
+
**2.1 Fill in Prompts**
diff --git a/en/user-guide/creating-dify-apps/use-cases/build-an-notion-ai-assistant.md b/en/user-guide/creating-dify-apps/use-cases/build-an-notion-ai-assistant.md
index 2aa9adb..b8138a7 100644
--- a/en/user-guide/creating-dify-apps/use-cases/build-an-notion-ai-assistant.md
+++ b/en/user-guide/creating-dify-apps/use-cases/build-an-notion-ai-assistant.md
@@ -94,7 +94,7 @@ _I want you to act as an IT Expert in my Notion workspace, using your knowledge
It's recommended to initially enable the AI to actively furnish the users with a starter sentence, providing a clue as to what they can ask. Furthermore, activating the 'Speech to Text' feature can allow users to interact with your AI assistant using their voice.
-
+
Finally, Click the "Publish" button on the top right of the page. Now you can click the public URL in the "Overview" section to converse with your personalized AI assistant!
diff --git a/en/user-guide/using-dify-apps/conversation-application.md b/en/user-guide/using-dify-apps/conversation-application.md
index 58d0840..6ab2621 100644
--- a/en/user-guide/using-dify-apps/conversation-application.md
+++ b/en/user-guide/using-dify-apps/conversation-application.md
@@ -52,4 +52,4 @@ _Please make sure that the device environment you are using is authorized to use
If the "Quotations and Attribution" feature is enabled during the application arrangement, the dialogue returns will automatically show the quoted knowledge document sources.
-
+