docs: added fetching from notion (#2670)
* Added new Docusaurus instance that fetches automatically from Notion * Add Github workflow to fetch docs from Notion * Added legacy peer deps to solve dependency problems * Fix git ignore and added pages
2
.github/workflows/deploy_gh-pages.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
cache-dependency-path: ./docs/package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: cd docs && npm install
|
||||
run: cd docs && npm install --legacy-peer-deps
|
||||
- name: Build website
|
||||
run: cd docs && npm run build
|
||||
|
||||
|
||||
2
.github/workflows/docs_test.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
- name: Install Node.js dependencies
|
||||
run: |
|
||||
cd docs
|
||||
npm install
|
||||
npm install --legacy-peer-deps
|
||||
if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
|
||||
|
||||
- name: Build Docs
|
||||
|
||||
77
.github/workflows/fetch_docs_notion.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Fetch Docs from Notion
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
NODE_VERSION: "21"
|
||||
|
||||
jobs:
|
||||
fetch-docs:
|
||||
name: Fetch Docs from Notion
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
id: setup-node
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Configure git
|
||||
run: |
|
||||
git config --global user.name 'github-actions[bot]'
|
||||
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
|
||||
|
||||
- name: Create new branch
|
||||
run: |
|
||||
git checkout -b update-docs-$(date +%Y%m%d%H%M%S)
|
||||
|
||||
- name: Cache Node.js dependencies
|
||||
uses: actions/cache@v4
|
||||
id: npm-cache
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('docs/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Node.js dependencies
|
||||
run: |
|
||||
cd docs
|
||||
npm install --legacy-peer-deps
|
||||
if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
|
||||
|
||||
- name: Fetch Docs from Notion
|
||||
run: |
|
||||
cd docs
|
||||
npm run pull
|
||||
env:
|
||||
NOTION_TOKEN: ${{ secrets.NOTION_TOKEN }}
|
||||
NOTION_DOCS_ROOT_PAGE_ID: ${{ secrets.NOTION_DOCS_ROOT_PAGE_ID }}
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
git add .
|
||||
git commit -m "Update docs from Notion"
|
||||
|
||||
- name: Push changes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git push --set-upstream origin $(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
- name: Create Pull Request
|
||||
id: create_pr
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: Update docs from Notion
|
||||
branch: update-docs-$(date +%Y%m%d%H%M%S)
|
||||
base: main
|
||||
title: "docs: update docs from notion"
|
||||
body: This PR updates the documentation from Notion.
|
||||
labels: documentation
|
||||
23
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
static
|
||||
*.orig
|
||||
60
docs/css/docu-notion-styles.css
Normal file
@@ -0,0 +1,60 @@
|
||||
/* This should be added to the docusaurus.config.js in order to show some notion things correctly.
|
||||
See the option: --css-output-directory
|
||||
See the docusaurus docs: https://docusaurus.io/docs/styling-layout
|
||||
See the use in the docu-notion-sample-site: https://github.com/sillsdev/docu-notion-sample-site/blob/main/docusaurus.config.js
|
||||
*/
|
||||
|
||||
/* Copied from
|
||||
https://github1s.com/NotionX/react-notion-x/blob/master/packages/react-notion-x/src/styles.css#L934
|
||||
and
|
||||
https://github1s.com/NotionX/react-notion-x/blob/master/packages/react-notion-x/src/styles.css#L1063
|
||||
*/
|
||||
.notion-column {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
padding-top: 12px;
|
||||
padding-bottom: 12px;
|
||||
}
|
||||
|
||||
.notion-column > *:first-child {
|
||||
margin-top: 0;
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
}
|
||||
|
||||
.notion-column > *:last-child {
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.notion-row {
|
||||
display: flex;
|
||||
overflow: hidden;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.notion-row {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.notion-row .notion-column {
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.notion-row .notion-spacer {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
.notion-spacer {
|
||||
/* This matches the value in ColumnTransformer.ts */
|
||||
width: calc(min(32px, 4vw));
|
||||
}
|
||||
|
||||
.notion-spacer:last-child {
|
||||
display: none;
|
||||
}
|
||||
/* End copied from NotionX */
|
||||
36
docs/css/gifplayer.css
Normal file
@@ -0,0 +1,36 @@
|
||||
.gif_player {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
user-select: none;
|
||||
-webkit-touch-callout: none;
|
||||
-webkit-tap-highlight-color: transparent; }
|
||||
.gif_player .play_button {
|
||||
background-color: rgba(0, 0, 0, 0.5);
|
||||
border: 2px dashed #fff;
|
||||
border-radius: 50%;
|
||||
box-shadow: 0 0 0 3px rgba(0, 0, 0, 0.5);
|
||||
color: #fff;
|
||||
cursor: pointer;
|
||||
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
font-size: 24px;
|
||||
left: 50%;
|
||||
opacity: 1;
|
||||
padding: 14px 10px;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
transform: translate(-50%, -50%) scale(1) rotate(0deg);
|
||||
transition: transform 0.4s, opacity 0.4s; }
|
||||
.gif_player .play_button:hover {
|
||||
background-color: rgba(0, 0, 0, 0.7);
|
||||
box-shadow: 0 0 0 3px rgba(0, 0, 0, 0.7); }
|
||||
.gif_player .play_button::after {
|
||||
content: "GIF"; }
|
||||
.gif_player.playing .play_button {
|
||||
transform: translate(-50%, -50%) scale(0) rotate(180deg);
|
||||
opacity: 0.5; }
|
||||
.gif_player img {
|
||||
max-width: 100%; }
|
||||
|
||||
BIN
docs/docs/Components/1028644105.png
Normal file
|
After Width: | Height: | Size: 385 KiB |
BIN
docs/docs/Components/238089171.png
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docs/docs/Components/241280398.png
Normal file
|
After Width: | Height: | Size: 133 KiB |
BIN
docs/docs/Components/263391508.png
Normal file
|
After Width: | Height: | Size: 185 KiB |
BIN
docs/docs/Components/565424296.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docs/docs/Components/938852908.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
1
docs/docs/Components/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":5, "label":"Components"}
|
||||
456
docs/docs/Components/components-custom-components.md
Normal file
@@ -0,0 +1,456 @@
|
||||
---
|
||||
title: Custom Components
|
||||
sidebar_position: 8
|
||||
slug: /components-custom-components
|
||||
---
|
||||
|
||||
|
||||
|
||||
Langflow components can be created from within the platform, allowing users to extend the platform's functionality using Python code. They encapsulate are designed to be independent units, reusable across different workflows.
|
||||
|
||||
|
||||
These components can be easily connected within a language model pipeline, adding freedom and flexibility to what can be included in between user and AI messages.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
Since Langflow operates with Python behind the scenes, you can implement any Python function within a Custom Component. This means you can leverage the power of libraries such as Pandas, Scikit-learn, Numpy, and thousands of packages to create components that handle data processing in unlimited ways.
|
||||
|
||||
|
||||
Custom Components are not just about extending functionality; they also streamline the development process. By creating reusable and configurable components, you can enhance the capabilities of Langflow, making it a powerful tool for developing complex workflows.
|
||||
|
||||
|
||||
### Key Characteristics: {#d3a151089a9e4584bd420461cd1432c6}
|
||||
|
||||
1. **Modular and Reusable**: Designed as independent units, components encapsulate specific functionality, making them reusable across different projects and workflows.
|
||||
2. **Integration with Python Libraries**: You can import libraries like Pandas, Scikit-learn, Numpy, etc., to build components that handle data processing, machine learning, numerical computations, and more.
|
||||
3. **Flexible Inputs and Outputs**: While Langflow offers native input and output types, you can use any type as long as they are properly annotated in the output methods (e.g., `> list[int]`).
|
||||
4. **Python-Powered**: Since Langflow operates with Python behind the scenes, any Python function can be implemented within a custom component.
|
||||
5. **Enhanced Workflow**: Custom components serve as reusable building blocks, enabling you to create pre-processing visual blocks with ease and integrate them into your language model pipeline.
|
||||
|
||||
### Why Use Custom Components? {#827a2b5acec94426a4a2106a8332622d}
|
||||
|
||||
- **Customization**: Tailor the functionality to your specific needs by writing Python code that suits your workflow.
|
||||
- **Flexibility**: Add any Python-based logic or processing step between user/AI messages, enhancing the flexibility of Langflow.
|
||||
- **Efficiency**: Streamline your development process by creating reusable, configurable components that can be easily deployed.
|
||||
|
||||
### How to Write Them {#2088ade519514bb3923cdf7f2ac2089a}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Writing custom components in Langflow involves creating a Python class that defines the component's functionality, inputs, and outputs. The process involves a few key steps:
|
||||
|
||||
1. **Define the Class**: Start by defining a Python class that inherits from `Component`. This class will encapsulate the functionality of your custom component.
|
||||
2. **Specify Inputs and Outputs**: Use Langflow's input and output classes to define the inputs and outputs of your component. They should be declared as class attributes.
|
||||
3. **Implement Output Methods**: Implement methods for each output, which contains the logic of your component. These methods can access input values using `self.<input_name>` , return processed values and define what to be displayed in the component with the `self.status` attribute.
|
||||
4. **Use Proper Annotations**: Ensure that output methods are properly annotated with their types. Langflow uses these annotations to validate and handle data correctly.
|
||||
|
||||
Here's a basic structure of a custom component:
|
||||
|
||||
|
||||
```python
|
||||
from langflow.custom import Component
|
||||
from langflow.inputs import StrInput, IntInput
|
||||
from langflow.template import Output
|
||||
|
||||
class MyCustomComponent(Component):
|
||||
icon = "coffee" # check lucide.dev/icons or pass an emoji
|
||||
|
||||
inputs = [
|
||||
StrInput(
|
||||
name="input_text",
|
||||
display_name="Input Text",
|
||||
info="Text to be processed.",
|
||||
),
|
||||
IntInput(
|
||||
name="input_number",
|
||||
display_name="Input Number",
|
||||
info="Number to be processed.",
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Processed Text", name="processed_text", method="process_text"),
|
||||
]
|
||||
|
||||
def process_text(self) -> str:
|
||||
input_text = self.input_text
|
||||
input_number = self.input_number
|
||||
# Implement your logic here
|
||||
processed_text = f"{input_text} processed with number {input_number}"
|
||||
self.status = processed_text
|
||||
return processed_text
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
Paste that code into the Custom Component code snippet and click **Check & Save.**
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
You should see something like the component below. Double click the name or description areas to edit them.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## Input Types {#3815589831f24ab792328ed233c8b00d}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Langflow provides several higher-level input types to simplify the creation of custom components. These input types standardize how inputs are defined, validated, and used. Here’s a guide on how to use these inputs and their primary purposes:
|
||||
|
||||
|
||||
### **HandleInput** {#fb06c48a326043ffa46badc1ab3ba467}
|
||||
|
||||
|
||||
Represents an input that has a handle to a specific type (e.g., `BaseLanguageModel`, `BaseRetriever`, etc.).
|
||||
|
||||
- **Usage:** Useful for connecting to specific component types in a flow.
|
||||
|
||||
### **DataInput** {#0e1dcb768e38487180d720b0884a90f5}
|
||||
|
||||
|
||||
Represents an input that receives a `Data` object.
|
||||
|
||||
- **Usage:** Ideal for components that process or manipulate data objects.
|
||||
- **Input Types:** `["Data"]`
|
||||
|
||||
### **StrInput** {#4ec6e68ad9ab4cd194e8e607bc5b3411}
|
||||
|
||||
|
||||
Represents a standard string input field.
|
||||
|
||||
- **Usage:** Used for any text input where the user needs to provide a string.
|
||||
- **Input Types:** `["Text"]`
|
||||
|
||||
### **MessageInput** {#9292ac0105e14177af5eff2131b9c71b}
|
||||
|
||||
|
||||
Represents an input field specifically for `Message` objects.
|
||||
|
||||
- **Usage:** Used in components that handle or process messages.
|
||||
- **Input Types:** `["Message"]`
|
||||
|
||||
### **MessageTextInput** {#5511f5e32b944b4e973379a6bd5405e4}
|
||||
|
||||
|
||||
Represents a text input for messages.
|
||||
|
||||
- **Usage:** Suitable for components that need to extract text from message objects.
|
||||
- **Input Types:** `["Message"]`
|
||||
|
||||
### **MultilineInput** {#e6d8315b0fb44a2fb8c62c3f3184bbe9}
|
||||
|
||||
|
||||
Represents a text field that supports multiple lines.
|
||||
|
||||
- **Usage:** Ideal for longer text inputs where the user might need to write extended text.
|
||||
- **Input Types:** `["Text"]`
|
||||
- **Attributes:** `multiline=True`
|
||||
|
||||
### **SecretStrInput** {#2283c13aa5f745b8b0009f7d40e59419}
|
||||
|
||||
|
||||
Represents a password input field.
|
||||
|
||||
- **Usage:** Used for sensitive text inputs where the input should be hidden (e.g., passwords, API keys).
|
||||
- **Attributes:** `password=True`
|
||||
- **Input Types:** Does not accept input types, meaning it has no input handles for previous nodes/components to connect to it.
|
||||
|
||||
### **IntInput** {#612680db6578451daef695bd19827a56}
|
||||
|
||||
|
||||
Represents an integer input field.
|
||||
|
||||
- **Usage:** Used for numeric inputs where the value should be an integer.
|
||||
- **Input Types:** `["Integer"]`
|
||||
|
||||
### **FloatInput** {#a15e1fdae15b49fc9bfbf38f8bd7b203}
|
||||
|
||||
|
||||
Represents a float input field.
|
||||
|
||||
- **Usage:** Used for numeric inputs where the value should be a floating-point number.
|
||||
- **Input Types:** `["Float"]`
|
||||
|
||||
### **BoolInput** {#3083671e0e7f4390a03396485114be66}
|
||||
|
||||
|
||||
Represents a boolean input field.
|
||||
|
||||
- **Usage:** Used for true/false or yes/no type inputs.
|
||||
- **Input Types:** `["Boolean"]`
|
||||
|
||||
### **NestedDictInput** {#2866fc4018e743d8a45afde53f1e57be}
|
||||
|
||||
|
||||
Represents an input field for nested dictionaries.
|
||||
|
||||
- **Usage:** Used for more complex data structures where the input needs to be a dictionary.
|
||||
- **Input Types:** `["NestedDict"]`
|
||||
|
||||
### **DictInput** {#daa2c2398f694ec199b425e2ed4bcf93}
|
||||
|
||||
|
||||
Represents an input field for dictionaries.
|
||||
|
||||
- **Usage:** Suitable for inputs that require a dictionary format.
|
||||
- **Input Types:** `["Dict"]`
|
||||
|
||||
### **DropdownInput** {#14dcdef11bab4d3f8127eaf2e36a77b9}
|
||||
|
||||
|
||||
Represents a dropdown input field.
|
||||
|
||||
- **Usage:** Used where the user needs to select from a predefined list of options.
|
||||
- **Attributes:** `options` to define the list of selectable options.
|
||||
- **Input Types:** `["Text"]`
|
||||
|
||||
### **FileInput** {#73e6377dc5f446f39517a558a1291410}
|
||||
|
||||
|
||||
Represents a file input field.
|
||||
|
||||
- **Usage:** Used to upload files.
|
||||
- **Attributes:** `file_types` to specify the types of files that can be uploaded.
|
||||
- **Input Types:** `["File"]`
|
||||
|
||||
Here is an example of how these inputs can be defined in a custom component:
|
||||
|
||||
|
||||
```python
|
||||
from langflow.custom import Component
|
||||
from langflow.inputs import StrInput, MultilineInput, SecretStrInput, IntInput, DropdownInput
|
||||
from langflow.template import Output, Input
|
||||
|
||||
class MyCustomComponent(Component):
|
||||
display_name = "My Custom Component"
|
||||
description = "An example of a custom component with various input types."
|
||||
|
||||
inputs = [
|
||||
StrInput(
|
||||
name="username",
|
||||
display_name="Username",
|
||||
info="Enter your username."
|
||||
),
|
||||
SecretStrInput(
|
||||
name="password",
|
||||
display_name="Password",
|
||||
info="Enter your password."
|
||||
),
|
||||
MultilineInput(
|
||||
name="description",
|
||||
display_name="Description",
|
||||
info="Enter a detailed description.",
|
||||
),
|
||||
IntInput(
|
||||
name="age",
|
||||
display_name="Age",
|
||||
info="Enter your age."
|
||||
),
|
||||
DropdownInput(
|
||||
name="gender",
|
||||
display_name="Gender",
|
||||
options=["Male", "Female", "Other"],
|
||||
info="Select your gender."
|
||||
)
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Result", name="result", method="process_inputs"),
|
||||
]
|
||||
|
||||
def process_inputs(self):
|
||||
# Your processing logic here
|
||||
return "Processed"
|
||||
```
|
||||
|
||||
|
||||
By defining inputs this way, Langflow can automatically handle the validation and display of these fields in the user interface, making it easier to create robust and user-friendly custom components.
|
||||
|
||||
|
||||
All of the types detailed above derive from a general class that can also be accessed through the generic `Input` class.
|
||||
|
||||
|
||||
### Generic Input {#278e2027493e45b68746af0a5b6c06f6}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Langflow offers native input types, but you can use any type as long as they are properly annotated in the output methods (e.g., `-> list[int]`).
|
||||
|
||||
|
||||
The `Input` class is highly customizable, allowing you to specify a wide range of attributes for each input field. It has several attributes that can be customized:
|
||||
|
||||
- `field_type`: Specifies the type of field (e.g., `str`, `int`). Default is `str`.
|
||||
- `required`: Boolean indicating if the field is required. Default is `False`.
|
||||
- `placeholder`: Placeholder text for the input field. Default is an empty string.
|
||||
- `is_list`: Boolean indicating if the field should accept a list of values. Default is `False`.
|
||||
- `show`: Boolean indicating if the field should be shown. Default is `True`.
|
||||
- `multiline`: Boolean indicating if the field should allow multi-line input. Default is `False`.
|
||||
- `value`: Default value for the input field. Default is `None`.
|
||||
- `file_types`: List of accepted file types (for file inputs). Default is an empty list.
|
||||
- `file_path`: File path if the field is a file input. Default is `None`.
|
||||
- `password`: Boolean indicating if the field is a password. Default is `False`.
|
||||
- `options`: List of options for the field (for dropdowns). Default is `None`.
|
||||
- `name`: Name of the input field. Default is `None`.
|
||||
- `display_name`: Display name for the input field. Default is `None`.
|
||||
- `advanced`: Boolean indicating if the field is an advanced parameter. Default is `False`.
|
||||
- `input_types`: List of accepted input types. Default is `None`.
|
||||
- `dynamic`: Boolean indicating if the field is dynamic. Default is `False`.
|
||||
- `info`: Additional information or tooltip for the input field. Default is an empty string.
|
||||
- `real_time_refresh`: Boolean indicating if the field should refresh in real-time. Default is `None`.
|
||||
- `refresh_button`: Boolean indicating if the field should have a refresh button. Default is `None`.
|
||||
- `refresh_button_text`: Text for the refresh button. Default is `None`.
|
||||
- `range_spec`: Range specification for numeric fields. Default is `None`.
|
||||
- `load_from_db`: Boolean indicating if the field should load from the database. Default is `False`.
|
||||
- `title_case`: Boolean indicating if the display name should be in title case. Default is `True`.
|
||||
|
||||
Below is an example of how to define inputs for a component using the `Input` class:
|
||||
|
||||
|
||||
```python
|
||||
from langflow.template import Input, Output
|
||||
from langflow.custom import Component
|
||||
from langflow.field_typing import Text
|
||||
|
||||
class ExampleComponent(Component):
|
||||
display_name = "Example Component"
|
||||
description = "An example component demonstrating input fields."
|
||||
|
||||
inputs = [
|
||||
Input(
|
||||
name="input_text",
|
||||
display_name="Input Text",
|
||||
field_type="str",
|
||||
required=True,
|
||||
placeholder="Enter some text",
|
||||
multiline=True,
|
||||
info="This is a required text input.",
|
||||
input_types=["Text"]
|
||||
),
|
||||
Input(
|
||||
name="max_length",
|
||||
display_name="Max Length",
|
||||
field_type="int",
|
||||
required=False,
|
||||
placeholder="Maximum length",
|
||||
info="Enter the maximum length of the text.",
|
||||
range_spec={"min": 0, "max": 1000},
|
||||
),
|
||||
Input(
|
||||
name="options",
|
||||
display_name="Options",
|
||||
field_type="str",
|
||||
is_list=True,
|
||||
options=["Option 1", "Option 2", "Option 3"],
|
||||
info="Select one or more options."
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Result", name="result", method="process_input"),
|
||||
]
|
||||
|
||||
def process_input(self) -> Text:
|
||||
# Process the inputs and generate output
|
||||
return Text(value=f"Processed: {self.input_text}, Max Length: {self.max_length}, Options: {self.options}")
|
||||
|
||||
# Define how to use the inputs and outputs
|
||||
component = ExampleComponent()
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
In this example:
|
||||
|
||||
- The `input_text` input is a required multi-line text field.
|
||||
- The `max_length` input is an optional integer field with a range specification.
|
||||
- The `options` input is a list of strings with predefined options.
|
||||
|
||||
These attributes allow for a high degree of customization, making it easy to create input fields that suit the needs of your specific component.
|
||||
|
||||
|
||||
### Multiple Outputs {#6f225be8a142450aa19ee8e46a3b3c8c}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
In Langflow, custom components can have multiple outputs. Each output can be associated with a specific method in the component, allowing you to define distinct behaviors for each output path. This feature is particularly useful when you want to route data based on certain conditions or process it in multiple ways.
|
||||
|
||||
1. **Definition of Outputs**: Each output is defined in the `outputs` list of the component. Each output is associated with a display name, an internal name, and a method that gets called to generate the output.
|
||||
2. **Output Methods**: The methods associated with outputs are responsible for generating the data for that particular output. These methods are called when the component is executed, and each method can independently produce its result.
|
||||
|
||||
Below is an example of a component with two outputs:
|
||||
|
||||
- `process_data`: Processes the input text (e.g., converts it to uppercase) and returns it.
|
||||
- `get_processing_function`: Returns the `process_data` method itself to be reused in composition.
|
||||
|
||||
```python
|
||||
from typing import Callable
|
||||
from langflow.custom import Component
|
||||
from langflow.inputs import StrInput
|
||||
from langflow.template import Output
|
||||
from langflow.field_typing import Text
|
||||
|
||||
class DualOutputComponent(Component):
|
||||
display_name = "Dual Output"
|
||||
description = "Processes input text and returns both the result and the processing function."
|
||||
icon = "double-arrow"
|
||||
|
||||
inputs = [
|
||||
StrInput(
|
||||
name="input_text",
|
||||
display_name="Input Text",
|
||||
info="The text input to be processed.",
|
||||
),
|
||||
]
|
||||
|
||||
outputs = [
|
||||
Output(display_name="Processed Data", name="processed_data", method="process_data"),
|
||||
Output(display_name="Processing Function", name="processing_function", method="get_processing_function"),
|
||||
]
|
||||
|
||||
def process_data(self) -> Text:
|
||||
# Process the input text (e.g., convert to uppercase)
|
||||
processed = self.input_text.upper()
|
||||
self.status = processed
|
||||
return processed
|
||||
|
||||
def get_processing_function(self) -> Callable[[], Text]:
|
||||
# Return the processing function itself
|
||||
return self.process_data
|
||||
```
|
||||
|
||||
|
||||
This example shows how to define multiple outputs in a custom component. The first output returns the processed data, while the second output returns the processing function itself.
|
||||
|
||||
|
||||
The `processing_function` output can be used in scenarios where the function itself is needed for further processing or dynamic flow control. Notice how both outputs are properly annotated with their respective types, ensuring clarity and type safety.
|
||||
|
||||
|
||||
## Special Operations {#b1ef2d18e2694b93927ae9403d24b96b}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Advanced methods and attributes offer additional control and functionality. Understanding how to leverage these can enhance your custom components' capabilities.
|
||||
|
||||
- `self.inputs`: Access all defined inputs. Useful when an output method needs to interact with multiple inputs.
|
||||
- `self.outputs`: Access all defined outputs. This is particularly useful if an output function needs to trigger another output function.
|
||||
- `self.status`: Use this to update the component's status or intermediate results. It helps track the component's internal state or store temporary data.
|
||||
- `self.graph.flow_id`: Retrieve the flow ID, useful for maintaining context or debugging.
|
||||
- `self.stop("output_name")`: Use this method within an output function to prevent data from being sent through other components. This method stops next component execution and is particularly useful for specific operations where a component should stop from running based on specific conditions.
|
||||
89
docs/docs/Components/components-data.md
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
title: Data
|
||||
sidebar_position: 3
|
||||
slug: /components-data
|
||||
---
|
||||
|
||||
|
||||
|
||||
## API Request {#23da589293f74016a1f70d6d7c0fdc55}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component sends HTTP requests to the specified URLs.
|
||||
|
||||
|
||||
Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **URLs:** The URLs to target.
|
||||
- **Method:** The HTTP method, such as GET or POST.
|
||||
- **Headers:** The headers to include with the request.
|
||||
- **Body:** The data to send with the request (for methods like POST, PATCH, PUT).
|
||||
- **Timeout:** The maximum time to wait for a response.
|
||||
|
||||
## Directory {#4fe56acaaac847029ace173dc793f8f4}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component recursively retrieves files from a specified directory.
|
||||
|
||||
|
||||
Use this component to retrieve various file types, such as text or JSON files, from a directory. Make sure to provide the correct path and configure the other parameters as needed.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Path:** The directory path.
|
||||
- **Types:** The types of files to retrieve. Leave this blank to retrieve all file types.
|
||||
- **Depth:** The level of directory depth to search.
|
||||
- **Max Concurrency:** The maximum number of simultaneous file loading operations.
|
||||
- **Load Hidden:** Set to true to include hidden files.
|
||||
- **Recursive:** Set to true to enable recursive search.
|
||||
- **Silent Errors:** Set to true to suppress exceptions on errors.
|
||||
- **Use Multithreading:** Set to true to use multithreading in file loading.
|
||||
|
||||
## File {#d5d4bb78ce0a473d8a3b6a296d3e8383}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component loads a file.
|
||||
|
||||
|
||||
Use this component to load files, such as text or JSON files. Ensure you specify the correct path and configure other parameters as necessary.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Path:** The file path.
|
||||
- **Silent Errors:** Set to true to prevent exceptions on errors.
|
||||
|
||||
## URL {#1cc513827a0942d6885b3a9168eabc97}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component retrieves content from specified URLs.
|
||||
|
||||
|
||||
Ensure the URLs are valid and adjust other parameters as needed. **Parameters:**
|
||||
|
||||
- **URLs:** The URLs to retrieve content from.
|
||||
|
||||
## Create Data {#aac4cad0cd38426191c2e7516285877b}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component allows you to create a `Data` from a number of inputs. You can add as many key-value pairs as you want (as long as it is less than 15). Once you've picked that number you'll need to write the name of the Key and can pass `Text` values from other components to it.
|
||||
|
||||
@@ -1,37 +1,45 @@
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: Embedding Models
|
||||
sidebar_position: 6
|
||||
slug: /components-embedding-models
|
||||
---
|
||||
|
||||
# Embeddings
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
## Amazon Bedrock Embeddings
|
||||
## Amazon Bedrock Embeddings {#4ddcfde8c1664e358d3f16d718e944d8}
|
||||
|
||||
|
||||
Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
|
||||
|
||||
Used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| -------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `credentials_profile_name` | `str` | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information. | |
|
||||
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
|
||||
| `model_id` | `str` | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API. | |
|
||||
| `endpoint_url` | `str` | URL to set a specific service endpoint other than the default AWS endpoint. | |
|
||||
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
|
||||
| `region_name` | `str` | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided. | |
|
||||
|
||||
## Astra vectorize
|
||||
|
||||
Used to generate server-side embeddings using [DataStax Astra](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).
|
||||
## Astra vectorize {#c1e6d1373824424ea130e052ba0f46af}
|
||||
|
||||
|
||||
Used to generate server-side embeddings using [DataStax Astra](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
|--------------------|----------|-----------------------------------------------------------------------------------------------------------------------|-------------|
|
||||
| ------------------ | -------- | --------------------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `provider` | `str` | The embedding provider to use. | |
|
||||
| `model_name` | `str` | The embedding model to use. | |
|
||||
| `authentication` | `dict` | Authentication parameters. Use the Astra Portal to add the embedding provider integration to your Astra organization. | |
|
||||
| `provider_api_key` | `str` | An alternative to the Astra Authentication that let you use directly the API key of the provider. | |
|
||||
| `model_parameters` | `dict` | Additional model parameters. | |
|
||||
|
||||
## Cohere Embeddings
|
||||
|
||||
Used to load embedding models from [Cohere](https://cohere.com/).
|
||||
## Cohere Embeddings {#0c5b7b8790da448fabd4c5ddba1fcbde}
|
||||
|
||||
|
||||
Used to load embedding models from [Cohere](https://cohere.com/).
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| ---------------- | -------- | ------------------------------------------------------------------------- | -------------------- |
|
||||
@@ -39,21 +47,27 @@ Used to load embedding models from [Cohere](https://cohere.com/).
|
||||
| `model` | `str` | Language model used for embedding text documents and performing queries. | `embed-english-v2.0` |
|
||||
| `truncate` | `bool` | Whether to truncate the input text to fit within the model's constraints. | `False` |
|
||||
|
||||
## Azure OpenAI Embeddings
|
||||
|
||||
## Azure OpenAI Embeddings {#8ffb790d5a6c484dab3fe6c777638a44}
|
||||
|
||||
|
||||
Generate embeddings using Azure OpenAI models.
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| ----------------- | -------- | -------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
|
||||
| `Azure Endpoint` | `str` | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` | |
|
||||
| `Deployment Name` | `str` | The name of the deployment. | |
|
||||
| `API Version` | `str` | The API version to use, options include various dates. | |
|
||||
| `API Key` | `str` | The API key to access the Azure OpenAI service. | |
|
||||
|
||||
## Hugging Face API Embeddings
|
||||
|
||||
## Hugging Face API Embeddings {#8536e4ee907b48688e603ae9bf7822cb}
|
||||
|
||||
|
||||
Generate embeddings using Hugging Face Inference API models.
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| --------------- | -------- | ----------------------------------------------------- | ------------------------ |
|
||||
| `API Key` | `str` | API key for accessing the Hugging Face Inference API. | |
|
||||
@@ -64,9 +78,12 @@ Generate embeddings using Hugging Face Inference API models.
|
||||
| `Model Kwargs` | `dict` | Additional arguments for the model. | |
|
||||
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
|
||||
|
||||
## Hugging Face Embeddings
|
||||
|
||||
Used to load embedding models from [HuggingFace](https://huggingface.co).
|
||||
## Hugging Face Embeddings {#b2b74732874743d3be6fdf8aae049e74}
|
||||
|
||||
|
||||
Used to load embedding models from [HuggingFace](https://huggingface.co/).
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| --------------- | -------- | ---------------------------------------------- | ----------------------------------------- |
|
||||
@@ -76,9 +93,12 @@ Used to load embedding models from [HuggingFace](https://huggingface.co).
|
||||
| `Model Name` | `str` | Name of the HuggingFace model to use. | `sentence-transformers/all-mpnet-base-v2` |
|
||||
| `Multi Process` | `bool` | Whether to use multiple processes. | `False` |
|
||||
|
||||
## OpenAI Embeddings
|
||||
|
||||
Used to load embedding models from [OpenAI](https://openai.com/).
|
||||
## OpenAI Embeddings {#af7630df05a245d1a632e1bf6db2a4c5}
|
||||
|
||||
|
||||
Used to load embedding models from [OpenAI](https://openai.com/).
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| -------------------------- | ---------------- | ------------------------------------------------ | ------------------------ |
|
||||
@@ -105,19 +125,25 @@ Used to load embedding models from [OpenAI](https://openai.com/).
|
||||
| `TikToken Enable` | `bool` | Whether to enable TikToken. | `True` |
|
||||
| `TikToken Model Name` | `str` | Name of the TikToken model. | |
|
||||
|
||||
## Ollama Embeddings
|
||||
|
||||
## Ollama Embeddings {#a26d2cb92e6d44669c2cfff71a5e9431}
|
||||
|
||||
|
||||
Generate embeddings using Ollama models.
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| ------------------- | -------- | ---------------------------------------------------------------------------------------- | ------------------------ |
|
||||
| `Ollama Model` | `str` | Name of the Ollama model to use. | `llama2` |
|
||||
| `Ollama Base URL` | `str` | Base URL of the Ollama API. | `http://localhost:11434` |
|
||||
| `Model Temperature` | `float` | Temperature parameter for the model. Adjusts the randomness in the generated embeddings. | |
|
||||
|
||||
## VertexAI Embeddings
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
|
||||
## VertexAI Embeddings {#707b38c23cb9413fbbaab1ae7b872311}
|
||||
|
||||
|
||||
Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
|
||||
|
||||
|
||||
| **Parameter** | **Type** | **Description** | **Default** |
|
||||
| --------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------- |
|
||||
@@ -128,7 +154,11 @@ Wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embedding
|
||||
| `project` | `str` | The default GCP project to use when making Vertex API calls. | |
|
||||
| `request_parallelism` | `int` | The amount of parallelism allowed for requests issued to VertexAI models. | `5` |
|
||||
| `temperature` | `float` | Tunes the degree of randomness in text generations. Should be a non-negative value. | `0` |
|
||||
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
|
||||
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
|
||||
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
|
||||
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |
|
||||
| `top_k` | `int` | How the model selects tokens for output, the next token is selected from the top `k` tokens. | `40` |
|
||||
| `top_p` | `float` | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value. | `0.95` |
|
||||
| `tuned_model_name` | `str` | The name of a tuned model. If provided, `model_name` is ignored. | |
|
||||
| `verbose` | `bool` | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug. | `False` |
|
||||
|
||||
|
||||
[Previous Vector Stores](/components-vector-stores)
|
||||
|
||||
161
docs/docs/Components/components-helpers.md
Normal file
@@ -0,0 +1,161 @@
|
||||
---
|
||||
title: Helpers
|
||||
sidebar_position: 4
|
||||
slug: /components-helpers
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Chat memory {#304dc4a3bea74efb9068093ff18a56ad}
|
||||
|
||||
|
||||
This component retrieves stored chat messages based on a specific session ID.
|
||||
|
||||
|
||||
### Parameters {#e0af57d97f844ce99789958161d19767}
|
||||
|
||||
- **Sender type:** Choose the sender type from options like "Machine", "User", or "Both".
|
||||
- **Sender name:** (Optional) The name of the sender.
|
||||
- **Number of messages:** Number of messages to retrieve.
|
||||
- **Session ID:** The session ID of the chat history.
|
||||
- **Order:** Choose the message order, either "Ascending" or "Descending".
|
||||
- **Data template:** (Optional) Template to convert a record to text. If left empty, the system dynamically sets it to the record's text key.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Combine text {#13443183e6054d0694d65f8df08833d5}
|
||||
|
||||
|
||||
This component concatenates two text sources into a single text chunk using a specified delimiter.
|
||||
|
||||
|
||||
### Parameters {#246676d119604fc5bf1be85fe93044aa}
|
||||
|
||||
- **First text:** The first text input to concatenate.
|
||||
- **Second text:** The second text input to concatenate.
|
||||
- **Delimiter:** A string used to separate the two text inputs. Defaults to a space.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Create record {#506f43345854473b8199631bf68a3b4a}
|
||||
|
||||
|
||||
This component dynamically creates a record with a specified number of fields.
|
||||
|
||||
|
||||
### Parameters {#08735e90bd10406695771bad8a95976a}
|
||||
|
||||
- **Number of fields:** Number of fields to be added to the record.
|
||||
- **Text key:** Key used as text.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Custom component {#cda421d4bccb4e7db2e48615884ed753}
|
||||
|
||||
|
||||
Use this component as a template to create your custom component.
|
||||
|
||||
|
||||
### Parameters {#04f9eb5e6da4431593a5bee8831f2327}
|
||||
|
||||
- **Parameter:** Describe the purpose of this parameter.
|
||||
|
||||
INFO
|
||||
|
||||
|
||||
Customize the `build_config` and `build` methods according to your requirements.
|
||||
|
||||
|
||||
Learn more about creating custom components at [Custom Component](http://docs.langflow.org/components/custom).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Documents to Data {#53a6a99a54f0435e9209169cf7730c55}
|
||||
|
||||
|
||||
Convert LangChain documents into Data.
|
||||
|
||||
|
||||
### Parameters {#0eb5fce528774c2db4a3677973e75cf8}
|
||||
|
||||
- **Documents:** Documents to be converted into Data.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### ID generator {#4a8fbfb95ebe44ee8718725546db5393}
|
||||
|
||||
|
||||
Generates a unique ID.
|
||||
|
||||
|
||||
### Parameters {#4629dd15594c47399c97d9511060e114}
|
||||
|
||||
- **Value:** Unique ID generated.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Message history {#6a1a60688641490197c6443df573960e}
|
||||
|
||||
|
||||
Retrieves stored chat messages based on a specific session ID.
|
||||
|
||||
|
||||
### Parameters {#31c7fc2a3e8c4f7c89f923e700f4ea34}
|
||||
|
||||
- **Sender type:** Options for the sender type.
|
||||
- **Sender name:** Sender name.
|
||||
- **Number of messages:** Number of messages to retrieve.
|
||||
- **Session ID:** Session ID of the chat history.
|
||||
- **Order:** Order of the messages.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Data to text {#f60ab5bbc0db4b27b427897eba97fe29}
|
||||
|
||||
|
||||
Convert Data into plain text following a specified template.
|
||||
|
||||
|
||||
### Parameters {#01b91376569149a49cfcfd9321323688}
|
||||
|
||||
- **Data:** The Data to convert to text.
|
||||
- **Template:** The template used for formatting the Data. It can contain keys like `{text}`, `{data}`, or any other key in the record.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Split text {#210be0ae518d411695d6caafdd7700eb}
|
||||
|
||||
|
||||
Split text into chunks of a specified length.
|
||||
|
||||
|
||||
### Parameters {#04197fcd05e64e10b189de1171a32682}
|
||||
|
||||
- **Texts:** Texts to split.
|
||||
- **Separators:** Characters to split on. Defaults to a space.
|
||||
- **Max chunk size:** The maximum length (in characters) of each chunk.
|
||||
- **Chunk overlap:** The amount of character overlap between chunks.
|
||||
- **Recursive:** Whether to split recursively.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Update record {#d3b6116dfd8d4af080ad01bc8fd2b0b3}
|
||||
|
||||
|
||||
Update a record with text-based key/value pairs, similar to updating a Python dictionary.
|
||||
|
||||
|
||||
### Parameters {#c830224edc1d486aaaa5e2889f4f6689}
|
||||
|
||||
- **Data:** The record to update.
|
||||
- **New data:** The new data to update the record with.
|
||||
135
docs/docs/Components/components-io.md
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
title: Inputs & Outputs
|
||||
sidebar_position: 1
|
||||
slug: /components-io
|
||||
---
|
||||
|
||||
|
||||
|
||||
Inputs and Outputs are a category of components that are used to define where data comes in and out of your flow. They also dynamically change the Playground and can be renamed to facilitate building and maintaining your flows.
|
||||
|
||||
|
||||
## Inputs {#6b1421ec66994d5ebe9fcce000829328}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Inputs are components used to define where data enters your flow. They can receive data from the user, a database, or any other source that can be converted to Text or Data.
|
||||
|
||||
|
||||
The difference between Chat Input and other Input components is the output format, the number of configurable fields, and the way they are displayed in the Playground.
|
||||
|
||||
|
||||
Chat Input components can output `Text` or `Data`. When you want to pass the sender name or sender to the next component, use the `Data` output. To pass only the message, use the `Text` output, useful when saving the message to a database or memory system like Zep.
|
||||
|
||||
|
||||
You can find out more about Chat Input and other Inputs [here](/components-io).
|
||||
|
||||
|
||||
### Chat Input {#2a5f02262f364f8fb75bcfa246e7bb26}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component collects user input from the chat.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Sender Type:** Specifies the sender type. Defaults to `User`. Options are `Machine` and `User`.
|
||||
- **Sender Name:** Specifies the name of the sender. Defaults to `User`.
|
||||
- **Message:** Specifies the message text. It is a multiline text input.
|
||||
- **Session ID:** Specifies the session ID of the chat history. If provided, the message will be saved in the Message History.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
If `As Data` is `true` and the `Message` is a `Data`, the data of the `Data` will be updated with the `Sender`, `Sender Name`, and `Session ID`.
|
||||
|
||||
|
||||
One significant capability of the Chat Input component is its ability to transform the Playground into a chat window. This feature is particularly valuable for scenarios requiring user input to initiate or influence the flow.
|
||||
|
||||
|
||||
### Text Input {#260aef3726834896b496b56cdefb6d4a}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
The **Text Input** component adds an **Input** field on the Playground. This enables you to define parameters while running and testing your flow.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Value:** Specifies the text input value. This is where the user inputs text data that will be passed to the next component in the sequence. If no value is provided, it defaults to an empty string.
|
||||
- **Data Template:** Specifies how a `Data` should be converted into `Text`.
|
||||
|
||||
The **Data Template** field is used to specify how a `Data` should be converted into `Text`. This is particularly useful when you want to extract specific information from a `Data` and pass it as text to the next component in the sequence.
|
||||
|
||||
|
||||
For example, if you have a `Data` with the following structure:
|
||||
|
||||
|
||||
`{ "name": "John Doe", "age": 30, "email": "johndoe@email.com"}`
|
||||
|
||||
|
||||
A template with `Name: {name}, Age: {age}` will convert the `Data` into a text string of `Name: John Doe, Age: 30`.
|
||||
|
||||
|
||||
If you pass more than one `Data`, the text will be concatenated with a new line separator.
|
||||
|
||||
|
||||
## Outputs {#f62c5ad37a6f45a39b463c9b35ce7842}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Outputs are components that are used to define where data comes out of your flow. They can be used to send data to the user, to the Playground, or to define how the data will be displayed in the Playground.
|
||||
|
||||
|
||||
The Chat Output works similarly to the Chat Input but does not have a field that allows for written input. It is used as an Output definition and can be used to send data to the user.
|
||||
|
||||
|
||||
You can find out more about it and the other Outputs [here](/components-io).
|
||||
|
||||
|
||||
### Chat Output {#1edd49b72781432ea29d70acbda4e7e7}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component sends a message to the chat.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Sender Type:** Specifies the sender type. Default is `"Machine"`. Options are `"Machine"` and `"User"`.
|
||||
- **Sender Name:** Specifies the sender's name. Default is `"AI"`.
|
||||
- **Session ID:** Specifies the session ID of the chat history. If provided, messages are saved in the Message History.
|
||||
- **Message:** Specifies the text of the message.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
If `As Data` is `true` and the `Message` is a `Data`, the data in the `Data` is updated with the `Sender`, `Sender Name`, and `Session ID`.
|
||||
|
||||
|
||||
### Text Output {#b607000bc0c5402db0433c1a7d734d01}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
This component displays text data to the user. It is useful when you want to show text without sending it to the chat.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Value:** Specifies the text data to be displayed. Defaults to an empty string.
|
||||
|
||||
The `TextOutput` component provides a simple way to display text data. It allows textual data to be visible in the chat window during your interaction flow.
|
||||
|
||||
309
docs/docs/Components/components-models.md
Normal file
@@ -0,0 +1,309 @@
|
||||
---
|
||||
title: Models
|
||||
sidebar_position: 5
|
||||
slug: /components-models
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Amazon Bedrock {#3b8ceacef3424234814f95895a25bf43}
|
||||
|
||||
|
||||
This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
- **Model ID (Optional):** Specifies the model ID to be used for text generation. Defaults to `"anthropic.claude-instant-v1"`. Available options include:
|
||||
- `"ai21.j2-grande-instruct"`
|
||||
- `"ai21.j2-jumbo-instruct"`
|
||||
- `"ai21.j2-mid"`
|
||||
- `"ai21.j2-mid-v1"`
|
||||
- `"ai21.j2-ultra"`
|
||||
- `"ai21.j2-ultra-v1"`
|
||||
- `"anthropic.claude-instant-v1"`
|
||||
- `"anthropic.claude-v1"`
|
||||
- `"anthropic.claude-v2"`
|
||||
- `"cohere.command-text-v14"`
|
||||
- **Credentials Profile Name (Optional):** Specifies the name of the credentials profile.
|
||||
- **Region Name (Optional):** Specifies the region name.
|
||||
- **Model Kwargs (Optional):** Additional keyword arguments for the model.
|
||||
- **Endpoint URL (Optional):** Specifies the endpoint URL.
|
||||
- **Streaming (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **Cache (Optional):** Specifies whether to cache the response.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure that necessary credentials are provided to connect to the Amazon Bedrock API. If connection fails, a ValueError will be raised.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Anthropic {#a6ae46f98c4c4d389d44b8408bf151a1}
|
||||
|
||||
|
||||
This component allows the generation of text using Anthropic Chat&Completion large language models.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Anthropic model to be used for text generation. Available options include (and not limited to):
|
||||
- `"claude-2.1"`
|
||||
- `"claude-2.0"`
|
||||
- `"claude-instant-1.2"`
|
||||
- `"claude-instant-1"`
|
||||
- **Anthropic API Key:** Your Anthropic API key.
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `256`.
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.7`.
|
||||
- **API Endpoint (Optional):** Specifies the endpoint of the Anthropic API. Defaults to `"https://api.anthropic.com"`if not specified.
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Anthropic Component Documentation](https://python.langchain.com/docs/integrations/chat/anthropic).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Azure OpenAI {#7e3bff29ce714479b07feeb4445680cd}
|
||||
|
||||
|
||||
This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Azure OpenAI model to be used for text generation. Available options include:
|
||||
- `"gpt-35-turbo"`
|
||||
- `"gpt-35-turbo-16k"`
|
||||
- `"gpt-35-turbo-instruct"`
|
||||
- `"gpt-4"`
|
||||
- `"gpt-4-32k"`
|
||||
- `"gpt-4-vision"`
|
||||
- `"gpt-4o"`
|
||||
- **Azure Endpoint:** Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`.
|
||||
- **Deployment Name:** Specifies the name of the deployment.
|
||||
- **API Version:** Specifies the version of the Azure OpenAI API to be used. Available options include:
|
||||
- `"2023-03-15-preview"`
|
||||
- `"2023-05-15"`
|
||||
- `"2023-06-01-preview"`
|
||||
- `"2023-07-01-preview"`
|
||||
- `"2023-08-01-preview"`
|
||||
- `"2023-09-01-preview"`
|
||||
- `"2023-12-01-preview"`
|
||||
- **API Key:** Your Azure OpenAI API key.
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.7`.
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `1000`.
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Cohere {#706396a33bf94894966c95571252d78b}
|
||||
|
||||
|
||||
This component enables text generation using Cohere large language models.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Cohere API Key:** Your Cohere API key.
|
||||
- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to `256`.
|
||||
- **Temperature (Optional):** Specifies the sampling temperature. Defaults to `0.75`.
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Google Generative AI {#074d9623463449f99d41b44699800e8a}
|
||||
|
||||
|
||||
This component enables text generation using Google Generative AI.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Google API Key:** Your Google API key to use for the Google Generative AI.
|
||||
- **Model:** The name of the model to use. Supported examples are `"gemini-pro"` and `"gemini-pro-vision"`.
|
||||
- **Max Output Tokens (Optional):** The maximum number of tokens to generate.
|
||||
- **Temperature:** Run inference with this temperature. Must be in the closed interval [0.0, 1.0].
|
||||
- **Top K (Optional):** Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.
|
||||
- **Top P (Optional):** The maximum cumulative probability of tokens to consider when sampling.
|
||||
- **N (Optional):** Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.
|
||||
- **Input Value:** The input to the model.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Hugging Face API {#c1267b9a6b36487cb2ee127ce9b64dbb}
|
||||
|
||||
|
||||
This component facilitates text generation using LLM models from the Hugging Face Inference API.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Endpoint URL:** The URL of the Hugging Face Inference API endpoint. Should be provided along with necessary authentication credentials.
|
||||
- **Task:** Specifies the task for text generation. Options include `"text2text-generation"`, `"text-generation"`, and `"summarization"`.
|
||||
- **API Token:** The API token required for authentication with the Hugging Face Hub.
|
||||
- **Model Keyword Arguments (Optional):** Additional keyword arguments for the model. Should be provided as a Python dictionary.
|
||||
- **Input Value:** The input text for text generation.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## LiteLLM Model {#9fb59dad3b294a05966320d39f483a50}
|
||||
|
||||
|
||||
Generates text using the `LiteLLM` collection of large language models.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Model name:** The name of the model to use. For example, `gpt-3.5-turbo`. (Type: str)
|
||||
- **API key:** The API key to use for accessing the provider's API. (Type: str, Optional)
|
||||
- **Provider:** The provider of the API key. (Type: str, Choices: "OpenAI", "Azure", "Anthropic", "Replicate", "Cohere", "OpenRouter")
|
||||
- **Temperature:** Controls the randomness of the text generation. (Type: float, Default: 0.7)
|
||||
- **Model kwargs:** Additional keyword arguments for the model. (Type: Dict, Optional)
|
||||
- **Top p:** Filter responses to keep the cumulative probability within the top p tokens. (Type: float, Optional)
|
||||
- **Top k:** Filter responses to only include the top k tokens. (Type: int, Optional)
|
||||
- **N:** Number of chat completions to generate for each prompt. (Type: int, Default: 1)
|
||||
- **Max tokens:** The maximum number of tokens to generate for each chat completion. (Type: int, Default: 256)
|
||||
- **Max retries:** Maximum number of retries for failed requests. (Type: int, Default: 6)
|
||||
- **Verbose:** Whether to print verbose output. (Type: bool, Default: False)
|
||||
- **Input:** The input prompt for text generation. (Type: str)
|
||||
- **Stream:** Whether to stream the output. (Type: bool, Default: False)
|
||||
- **System message:** System message to pass to the model. (Type: str, Optional)
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Ollama {#14e8e411d28d4711add53bfc3e52c6cd}
|
||||
|
||||
|
||||
Generate text using Ollama Local LLMs.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Base URL:** Endpoint of the Ollama API. Defaults to '[http://localhost:11434](http://localhost:11434/)' if not specified.
|
||||
- **Model Name:** The model name to use. Refer to [Ollama Library](https://ollama.ai/library) for more models.
|
||||
- **Temperature:** Controls the creativity of model responses. (Default: 0.8)
|
||||
- **Cache:** Enable or disable caching. (Default: False)
|
||||
- **Format:** Specify the format of the output (e.g., json). (Advanced)
|
||||
- **Metadata:** Metadata to add to the run trace. (Advanced)
|
||||
- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. (Default: Disabled)
|
||||
- **Mirostat Eta:** Learning rate for Mirostat algorithm. (Default: None) (Advanced)
|
||||
- **Mirostat Tau:** Controls the balance between coherence and diversity of the output. (Default: None) (Advanced)
|
||||
- **Context Window Size:** Size of the context window for generating tokens. (Default: None) (Advanced)
|
||||
- **Number of GPUs:** Number of GPUs to use for computation. (Default: None) (Advanced)
|
||||
- **Number of Threads:** Number of threads to use during computation. (Default: None) (Advanced)
|
||||
- **Repeat Last N:** How far back the model looks to prevent repetition. (Default: None) (Advanced)
|
||||
- **Repeat Penalty:** Penalty for repetitions in generated text. (Default: None) (Advanced)
|
||||
- **TFS Z:** Tail free sampling value. (Default: None) (Advanced)
|
||||
- **Timeout:** Timeout for the request stream. (Default: None) (Advanced)
|
||||
- **Top K:** Limits token selection to top K. (Default: None) (Advanced)
|
||||
- **Top P:** Works together with top-k. (Default: None) (Advanced)
|
||||
- **Verbose:** Whether to print out response text.
|
||||
- **Tags:** Tags to add to the run trace. (Advanced)
|
||||
- **Stop Tokens:** List of tokens to signal the model to stop generating text. (Advanced)
|
||||
- **System:** System to use for generating text. (Advanced)
|
||||
- **Template:** Template to use for generating text. (Advanced)
|
||||
- **Input:** The input text.
|
||||
- **Stream:** Whether to stream the response.
|
||||
- **System Message:** System message to pass to the model. (Advanced)
|
||||
|
||||
---
|
||||
|
||||
|
||||
## OpenAI {#fe6cd793446748eda6eaad72e30f70b3}
|
||||
|
||||
|
||||
This component facilitates text generation using OpenAI's models.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Input Value:** The input text for text generation.
|
||||
- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to `256`.
|
||||
- **Model Kwargs (Optional):** Additional keyword arguments for the model. Should be provided as a nested dictionary.
|
||||
- **Model Name (Optional):** The name of the model to use. Defaults to `gpt-4-1106-preview`. Supported options include: `gpt-4-turbo-preview`, `gpt-4-0125-preview`, `gpt-4-1106-preview`, `gpt-4-vision-preview`, `gpt-3.5-turbo-0125`, `gpt-3.5-turbo-1106`.
|
||||
- **OpenAI API Base (Optional):** The base URL of the OpenAI API. Defaults to `https://api.openai.com/v1`.
|
||||
- **OpenAI API Key (Optional):** The API key for accessing the OpenAI API.
|
||||
- **Temperature:** Controls the creativity of model responses. Defaults to `0.7`.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** System message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Qianfan {#6e4a6b2370ee4b9f8beb899e7cf9c8f6}
|
||||
|
||||
|
||||
This component facilitates the generation of text using Baidu Qianfan chat models.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include:
|
||||
- `"ERNIE-Bot"`
|
||||
- `"ERNIE-Bot-turbo"`
|
||||
- `"BLOOMZ-7B"`
|
||||
- `"Llama-2-7b-chat"`
|
||||
- `"Llama-2-13b-chat"`
|
||||
- `"Llama-2-70b-chat"`
|
||||
- `"Qianfan-BLOOMZ-7B-compressed"`
|
||||
- `"Qianfan-Chinese-Llama-2-7B"`
|
||||
- `"ChatGLM2-6B-32K"`
|
||||
- `"AquilaChat-7B"`
|
||||
- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
|
||||
- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop).
|
||||
- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `0.8`.
|
||||
- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `0.95`.
|
||||
- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to `1.0`.
|
||||
- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used.
|
||||
- **Input Value:** Specifies the input text for text generation.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** A system message to pass to the model.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Vertex AI {#86b7d539e17c436fb758c47ec3ffb084}
|
||||
|
||||
|
||||
The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API.
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API.
|
||||
- **Project:** The name of the project associated with the Vertex AI Chat API.
|
||||
- **Examples (Optional):** List of examples to provide context for text generation.
|
||||
- **Location:** The location of the Vertex AI Chat API service. Defaults to `us-central1`.
|
||||
- **Max Output Tokens:** The maximum number of tokens to generate. Defaults to `128`.
|
||||
- **Model Name:** The name of the model to use. Defaults to `chat-bison`.
|
||||
- **Temperature:** Controls the creativity of model responses. Defaults to `0.0`.
|
||||
- **Input Value:** The input text for text generation.
|
||||
- **Top K:** Limits token selection to top K. Defaults to `40`.
|
||||
- **Top P:** Works together with top-k. Defaults to `0.95`.
|
||||
- **Verbose:** Whether to print out response text. Defaults to `False`.
|
||||
- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to `False`.
|
||||
- **System Message (Optional):** System message to pass to the model.
|
||||
30
docs/docs/Components/components-prompts.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
title: Prompts
|
||||
sidebar_position: 2
|
||||
slug: /components-prompts
|
||||
---
|
||||
|
||||
|
||||
|
||||
A prompt is the input provided to a language model, consisting of multiple components and can be parameterized using prompt templates. A prompt template offers a reproducible method for generating prompts, enabling easy customization through input variables.
|
||||
|
||||
|
||||
### Prompt {#c852d1761e6c46b19ce72e5f7c70958c}
|
||||
|
||||
|
||||
This component creates a prompt template with dynamic variables. This is useful for structuring prompts and passing dynamic data to a language model.
|
||||
|
||||
|
||||
**Parameters**
|
||||
|
||||
- **Template:** The template for the prompt. This field allows you to create other fields dynamically by using curly brackets `{}`. For example, if you have a template like `Hello {name}, how are you?`, a new field called `name` will be created. Prompt variables can be created with any name inside curly brackets, e.g. `{variable_name}`.
|
||||
|
||||
### PromptTemplate {#6e32412f062b42efbdf56857eafb3651}
|
||||
|
||||
|
||||
The `PromptTemplate` component enables users to create prompts and define variables that control how the model is instructed. Users can input a set of variables which the template uses to generate the prompt when a conversation starts.
|
||||
|
||||
|
||||
After defining a variable in the prompt template, it acts as its own component input.
|
||||
|
||||
- **template:** The template used to format an individual request.
|
||||
546
docs/docs/Components/components-vector-stores.md
Normal file
@@ -0,0 +1,546 @@
|
||||
---
|
||||
title: Vector Stores
|
||||
sidebar_position: 7
|
||||
slug: /components-vector-stores
|
||||
---
|
||||
|
||||
|
||||
|
||||
### Astra DB {#453bcf5664154e37a920f1b602bd39da}
|
||||
|
||||
|
||||
The `Astra DB` initializes a vector store using Astra DB from Data. It creates Astra DB-based vector indexes to efficiently store and retrieve documents.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or Data for input.
|
||||
- **Embedding or Astra vectorize:** External or server-side model Astra DB uses.
|
||||
- **Collection Name:** Name of the Astra DB collection.
|
||||
- **Token:** Authentication token for Astra DB.
|
||||
- **API Endpoint:** API endpoint for Astra DB.
|
||||
- **Namespace:** Astra DB namespace.
|
||||
- **Metric:** Metric used by Astra DB.
|
||||
- **Batch Size:** Batch size for operations.
|
||||
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
|
||||
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
|
||||
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
|
||||
- **Setup Mode:** Setup mode for the vector store.
|
||||
- **Pre Delete Collection:** Option to delete the collection before setup.
|
||||
- **Metadata Indexing Include:** Fields to include in metadata indexing.
|
||||
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
|
||||
- **Collection Indexing Policy:** Indexing policy for the collection.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure you configure the necessary Astra DB token and API endpoint before starting.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Astra DB Search {#26f25d1933a9459bad2d6725f87beb11}
|
||||
|
||||
|
||||
`Astra DBSearch` searches an existing Astra DB vector store for documents similar to the input. It uses the `Astra DB`component's functionality for efficient retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as Similarity or MMR.
|
||||
- **Input Value:** Value to search for.
|
||||
- **Embedding or Astra vectorize:** External or server-side model Astra DB uses.
|
||||
- **Collection Name:** Name of the Astra DB collection.
|
||||
- **Token:** Authentication token for Astra DB.
|
||||
- **API Endpoint:** API endpoint for Astra DB.
|
||||
- **Namespace:** Astra DB namespace.
|
||||
- **Metric:** Metric used by Astra DB.
|
||||
- **Batch Size:** Batch size for operations.
|
||||
- **Bulk Insert Batch Concurrency:** Concurrency level for bulk inserts.
|
||||
- **Bulk Insert Overwrite Concurrency:** Concurrency level for overwriting during bulk inserts.
|
||||
- **Bulk Delete Concurrency:** Concurrency level for bulk deletions.
|
||||
- **Setup Mode:** Setup mode for the vector store.
|
||||
- **Pre Delete Collection:** Option to delete the collection before setup.
|
||||
- **Metadata Indexing Include:** Fields to include in metadata indexing.
|
||||
- **Metadata Indexing Exclude:** Fields to exclude from metadata indexing.
|
||||
- **Collection Indexing Policy:** Indexing policy for the collection.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Chroma {#74730795605143cba53e1f4c4f2ef5d6}
|
||||
|
||||
|
||||
`Chroma` sets up a vector store using Chroma for efficient vector storage and retrieval within language processing workflows.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Collection Name:** Name of the collection.
|
||||
- **Persist Directory:** Directory to persist the Vector Store.
|
||||
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
|
||||
- **Server Host (Optional):** Host for the Chroma server.
|
||||
- **Server Port (Optional):** Port for the Chroma server.
|
||||
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
|
||||
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
|
||||
- **Input:** Input data for creating the Vector Store.
|
||||
- **Embedding:** Embeddings used for the Vector Store.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Chroma Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Chroma Search {#5718072a155441f3a443b944ad4d638f}
|
||||
|
||||
|
||||
`ChromaSearch` searches a Chroma collection for documents similar to the input text. It leverages Chroma to ensure efficient document retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Input text for search.
|
||||
- **Search Type:** Type of search, such as Similarity or MMR.
|
||||
- **Collection Name:** Name of the Chroma collection.
|
||||
- **Index Directory:** Directory where the Chroma index is stored.
|
||||
- **Embedding:** Embedding model used for vectorization.
|
||||
- **Server CORS Allow Origins (Optional):** CORS allow origins for the Chroma server.
|
||||
- **Server Host (Optional):** Host for the Chroma server.
|
||||
- **Server Port (Optional):** Port for the Chroma server.
|
||||
- **Server gRPC Port (Optional):** gRPC port for the Chroma server.
|
||||
- **Server SSL Enabled (Optional):** SSL configuration for the Chroma server.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Couchbase {#6900a79347164f35af27ae27f0d64a6d}
|
||||
|
||||
|
||||
`Couchbase` builds a Couchbase vector store from Data, streamlining the storage and retrieval of documents.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used by Couchbase.
|
||||
- **Input:** Documents or Data.
|
||||
- **Couchbase Cluster Connection String:** Cluster Connection string.
|
||||
- **Couchbase Cluster Username:** Cluster Username.
|
||||
- **Couchbase Cluster Password:** Cluster Password.
|
||||
- **Bucket Name:** Bucket identifier in Couchbase.
|
||||
- **Scope Name:** Scope identifier in Couchbase.
|
||||
- **Collection Name:** Collection identifier in Couchbase.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
For detailed documentation and integration guides, please refer to the [Couchbase Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/couchbase).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Couchbase Search {#c77bb09425a3426f9677d38d8237d9ba}
|
||||
|
||||
|
||||
`CouchbaseSearch` leverages the Couchbase component to search for documents based on similarity metric.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used in the Vector Store.
|
||||
- **Couchbase Cluster Connection String:** Cluster Connection string.
|
||||
- **Couchbase Cluster Username:** Cluster Username.
|
||||
- **Couchbase Cluster Password:** Cluster Password.
|
||||
- **Bucket Name:** Bucket identifier.
|
||||
- **Scope Name:** Scope identifier.
|
||||
- **Collection Name:** Collection identifier in Couchbase.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### FAISS {#5b3f4e6592a847b69e07df2f674a03f0}
|
||||
|
||||
|
||||
The `FAISS` component manages document ingestion into a FAISS Vector Store, optimizing document indexing and retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used for vectorizing inputs.
|
||||
- **Input:** Documents to ingest.
|
||||
- **Folder Path:** Save path for the FAISS index, relative to Langflow.
|
||||
|
||||
For more details, see the [FAISS Component Documentation](https://faiss.ai/index.html).
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### FAISS Search {#81ff12d7205940a3b14e3ddf304630f8}
|
||||
|
||||
|
||||
`FAISSSearch` searches a FAISS Vector Store for documents similar to a given input, using similarity metrics for efficient retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used in the FAISS Vector Store.
|
||||
- **Folder Path:** Path to load the FAISS index from, relative to Langflow.
|
||||
- **Input:** Search query.
|
||||
- **Index Name:** Index identifier.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### MongoDB Atlas {#eba8892f7a204b97ad1c353e82948149}
|
||||
|
||||
|
||||
`MongoDBAtlas` builds a MongoDB Atlas-based vector store from Data, streamlining the storage and retrieval of documents.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Embedding:** Model used by MongoDB Atlas.
|
||||
- **Input:** Documents or Data.
|
||||
- **Collection Name:** Collection identifier in MongoDB Atlas.
|
||||
- **Database Name:** Database identifier.
|
||||
- **Index Name:** Index identifier.
|
||||
- **MongoDB Atlas Cluster URI:** Cluster URI.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure pymongo is installed for using MongoDB Atlas Vector Store.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### MongoDB Atlas Search {#686ba0e30a54438cbc7153b81ee4b1df}
|
||||
|
||||
|
||||
`MongoDBAtlasSearch` leverages the MongoDBAtlas component to search for documents based on similarity metrics.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used in the Vector Store.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Database Name:** Database identifier.
|
||||
- **Index Name:** Index identifier.
|
||||
- **MongoDB Atlas Cluster URI:** Cluster URI.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### PGVector {#7ceebdd84ab14f8e8589c13c58370e5b}
|
||||
|
||||
|
||||
`PGVector` integrates a Vector Store within a PostgreSQL database, allowing efficient storage and retrieval of vectors.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Value for the Vector Store.
|
||||
- **Embedding:** Model used.
|
||||
- **PostgreSQL Server Connection String:** Server URL.
|
||||
- **Table:** Table name in the PostgreSQL database.
|
||||
|
||||
For more details, see the [PGVector Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/pgvector).
|
||||
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure the PostgreSQL server is accessible and configured correctly.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### PGVector Search {#196bf22ea2844bdbba971b5082750943}
|
||||
|
||||
|
||||
`PGVectorSearch` extends `PGVector` to search for documents based on similarity metrics.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **PostgreSQL Server Connection String:** Server URL.
|
||||
- **Table:** Table name.
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Pinecone {#67abbe3e27c34fb4bcb35926ce831727}
|
||||
|
||||
|
||||
`Pinecone` constructs a Pinecone wrapper from Data, setting up Pinecone-based vector indexes for document storage and retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or Data.
|
||||
- **Embedding:** Model used.
|
||||
- **Index Name:** Index identifier.
|
||||
- **Namespace:** Namespace used.
|
||||
- **Pinecone API Key:** API key.
|
||||
- **Pinecone Environment:** Environment settings.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
- **Pool Threads:** Number of threads.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure the Pinecone API key and environment are correctly configured.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Pinecone Search {#977944558cad4cf2ba332ea4f06bf485}
|
||||
|
||||
|
||||
`PineconeSearch` searches a Pinecone Vector Store for documents similar to the input, using advanced similarity metrics.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **Index Name:** Index identifier.
|
||||
- **Namespace:** Namespace used.
|
||||
- **Pinecone API Key:** API key.
|
||||
- **Pinecone Environment:** Environment settings.
|
||||
- **Search Kwargs:** Additional search parameters.
|
||||
- **Pool Threads:** Number of threads.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Qdrant {#88df77f3044e4ac6980950835a919fb0}
|
||||
|
||||
|
||||
`Qdrant` allows efficient similarity searches and retrieval operations, using a list of texts to construct a Qdrant wrapper.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or Data.
|
||||
- **Embedding:** Model used.
|
||||
- **API Key:** Qdrant API key.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Qdrant Search {#5ba5f8dca0f249d7ad00778f49901e6c}
|
||||
|
||||
|
||||
`QdrantSearch` extends `Qdrant` to search for documents similar to the input based on advanced similarity metrics.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **API Key:** Qdrant API key.
|
||||
- **Collection Name:** Collection identifier.
|
||||
- **Advanced Settings:** Includes content payload key, distance function, gRPC port, host, HTTPS, location, metadata payload key, path, port, prefer gRPC, prefix, search kwargs, timeout, URL.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Redis {#a0fb8a9d244a40eb8439d0f8c22a2562}
|
||||
|
||||
|
||||
`Redis` manages a Vector Store in a Redis database, supporting efficient vector storage and retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Index Name:** Default index name.
|
||||
- **Input:** Data for building the Redis Vector Store.
|
||||
- **Embedding:** Model used.
|
||||
- **Schema:** Optional schema file (.yaml) for document structure.
|
||||
- **Redis Server Connection String:** Server URL.
|
||||
- **Redis Index:** Optional index name.
|
||||
|
||||
For detailed documentation, refer to the [Redis Documentation](https://python.langchain.com/docs/integrations/vectorstores/redis).
|
||||
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure the Redis server URL and index name are configured correctly. Provide a schema if no documents are available.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Redis Search {#80aea4da515f490e979c8576099ee880}
|
||||
|
||||
|
||||
`RedisSearch` searches a Redis Vector Store for documents similar to the input.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Index Name:** Default index name.
|
||||
- **Embedding:** Model used.
|
||||
- **Schema:** Optional schema file (.yaml) for document structure.
|
||||
- **Redis Server Connection String:** Server URL.
|
||||
- **Redis Index:** Optional index name.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Supabase {#e86fb3cc507e4b5494f0a421f94e853b}
|
||||
|
||||
|
||||
`Supabase` initializes a Supabase Vector Store from texts and embeddings, setting up an environment for efficient document retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Input:** Documents or data.
|
||||
- **Embedding:** Model used.
|
||||
- **Query Name:** Optional query name.
|
||||
- **Search Kwargs:** Advanced search parameters.
|
||||
- **Supabase Service Key:** Service key.
|
||||
- **Supabase URL:** Instance URL.
|
||||
- **Table Name:** Optional table name.
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure the Supabase service key, URL, and table name are properly configured.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Supabase Search {#fd02d550b9b2457f91f2f4073656cb09}
|
||||
|
||||
|
||||
`SupabaseSearch` searches a Supabase Vector Store for documents similar to the input.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Embedding:** Model used.
|
||||
- **Query Name:** Optional query name.
|
||||
- **Search Kwargs:** Advanced search parameters.
|
||||
- **Supabase Service Key:** Service key.
|
||||
- **Supabase URL:** Instance URL.
|
||||
- **Table Name:** Optional table name.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Vectara {#b4e05230b62a47c792a89c5511af97ac}
|
||||
|
||||
|
||||
`Vectara` sets up a Vectara Vector Store from files or upserted data, optimizing document retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Vectara Customer ID:** Customer ID.
|
||||
- **Vectara Corpus ID:** Corpus ID.
|
||||
- **Vectara API Key:** API key.
|
||||
- **Files Url:** Optional URLs for file initialization.
|
||||
- **Input:** Optional data for corpus upsert.
|
||||
|
||||
For more information, consult the [Vectara Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/vectara).
|
||||
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
If inputs or files_url are provided, they will be processed accordingly.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Vectara Search {#31a47221c23f4fbba4a7465cf1d89eb0}
|
||||
|
||||
|
||||
`VectaraSearch` searches a Vectara Vector Store for documents based on the provided input.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Vectara Customer ID:** Customer ID.
|
||||
- **Vectara Corpus ID:** Corpus ID.
|
||||
- **Vectara API Key:** API key.
|
||||
- **Files Url:** Optional URLs for file initialization.
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Weaviate {#57c7969574b1418dbb079ac5fc8cd857}
|
||||
|
||||
|
||||
`Weaviate` facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Weaviate URL:** Default instance URL.
|
||||
- **Search By Text:** Indicates whether to search by text.
|
||||
- **API Key:** Optional API key for authentication.
|
||||
- **Index Name:** Optional index name.
|
||||
- **Text Key:** Default text extraction key.
|
||||
- **Input:** Document or record.
|
||||
- **Embedding:** Model used.
|
||||
- **Attributes:** Optional additional attributes.
|
||||
|
||||
For more details, see the [Weaviate Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/weaviate).
|
||||
|
||||
|
||||
NOTE
|
||||
|
||||
|
||||
Ensure Weaviate instance is running and accessible. Verify API key, index name, text key, and attributes are set correctly.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Weaviate Search {#6d4e616dfd6143b28dc055bc1c40ecae}
|
||||
|
||||
|
||||
`WeaviateSearch` searches a Weaviate Vector Store for documents similar to the input.
|
||||
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- **Search Type:** Type of search, such as "Similarity" or "MMR".
|
||||
- **Input Value:** Search query.
|
||||
- **Weaviate URL:** Default instance URL.
|
||||
- **Search By Text:** Indicates whether to search by text.
|
||||
- **API Key:** Optional API key for authentication.
|
||||
- **Index Name:** Optional index name.
|
||||
- **Text Key:** Default text extraction key.
|
||||
- **Embedding:** Model used.
|
||||
- **Attributes:** Optional additional attributes.
|
||||
87
docs/docs/Components/components.md
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: Intro to Components
|
||||
sidebar_position: 0
|
||||
slug: /components
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Component {#0323a728d8314767adb907b998036bb4}
|
||||
|
||||
|
||||
A component is a single building block within a flow. It consists of inputs, outputs, and parameters that define their functionality. These elements provide a convenient and straightforward way to compose LLM-based applications. Learn more about components and how they work below.
|
||||
|
||||
|
||||
During the flow creation process, you will notice handles (colored circles) attached to one or both sides of a component. These handles use distinct colors to indicate the types of inputs and outputs that can be interconnected. Hover over a handle to see connection details.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
On the top right corner of the component, you'll find the a play button to run a component. Once it runs, a status icon appears and you can hover over that to visualize success or error messages. Start interacting with your AI by clicking the **Playground** at the bottom right of the workspace.
|
||||
|
||||
|
||||
### Component Menu {#7e3f2f8ff5074b2fb3eee97c9cfaabe7}
|
||||
|
||||
|
||||
Each component is unique, but they all have a menu bar at the top that looks something like this.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
It consists of options such as:
|
||||
|
||||
- **Code** — displays the component's Python code. You can modify the code and save it.
|
||||
- **Advanced** — See and adjust all parameters of a component.
|
||||
- **Freeze** — After a component runs, lock its previous output state to prevent it from re-running.
|
||||
|
||||
Click **All** (the "..." button) to see all options.
|
||||
|
||||
|
||||
### Output Preview {#ed7b3c34e0774b8a916b0e68821c9a7a}
|
||||
|
||||
|
||||
Langflow includes an output visualizer for components that opens a pop-up screen. This allows you to easily inspect and monitor transmissions between components, providing instant feedback on your workflows.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
### Advanced Settings {#b6430d4903df44f0ba4618a558c83d7b}
|
||||
|
||||
|
||||
Langflow components can be edited by clicking the **Advanced Settings** button.
|
||||
|
||||
|
||||
Hide parameters with the **Show** button to reduce complexity and keep the workspace clean and intuitive for experimentation.
|
||||
|
||||
|
||||
You can also double-click a component's name and description to modify those. Component descriptions accept markdown syntax.
|
||||
|
||||
|
||||
### Group Components {#c3f5ed818e3b40ceb6534dc358e1a5f2}
|
||||
|
||||
|
||||
Multiple components can be grouped into a single component for reuse. This is useful when combining large flows into single components (like RAG with a vector database, for example) and saving space.
|
||||
|
||||
1. Hold **Shift** and drag to select components.
|
||||
2. Select **Group**.
|
||||
3. The components merge into a single component.
|
||||
4. Double-click the name and description to change them.
|
||||
5. Save your grouped component to in the sidebar for later use!
|
||||
|
||||
[group video here]
|
||||
|
||||
|
||||
### Component Version {#887fd587589448dc8c27336d1c235b9b}
|
||||
|
||||
|
||||
A component's state is stored in a database, while sidebar components are like starter templates. As soon as you drag a component from the sidebar to the workspace, the two components are no longer in parity.
|
||||
|
||||
|
||||
The component will keep the version number it was initialized to the workspace with. Click the **Update Component** icon (exclamation mark) to bring the component up to the `latest` version. This will change the code of the component in place so you can validate that the component was updated by checking its Python code before and after updating it.
|
||||
|
||||
|
||||

|
||||
|
||||
|
Before Width: | Height: | Size: 67 KiB After Width: | Height: | Size: 67 KiB |
BIN
docs/docs/Configuration/1125619904.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 350 KiB After Width: | Height: | Size: 350 KiB |
|
Before Width: | Height: | Size: 341 KiB After Width: | Height: | Size: 341 KiB |
BIN
docs/docs/Configuration/1926471667.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 171 KiB After Width: | Height: | Size: 171 KiB |
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 2.9 KiB After Width: | Height: | Size: 2.9 KiB |
BIN
docs/docs/Configuration/711485342.gif
Normal file
|
After Width: | Height: | Size: 833 KiB |
BIN
docs/docs/Configuration/945175915.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
96
docs/docs/Configuration/Folders.md
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: Folders
|
||||
sidebar_position: 3
|
||||
slug: /365085a8-a90a-43f9-a779-f8769ec7eca1
|
||||
---
|
||||
|
||||
|
||||
|
||||
## **Collections and Projects** {#a259c11235144b18b3ae7a8265a2e6f5}
|
||||
|
||||
|
||||
My Collection is a space in Langflow where users can manage, organize, and access their flows and components. Flows and components are displayed as individual cards that provide relevant information.
|
||||
|
||||
|
||||

|
||||
|
||||
- **Folders**: Users can organize their projects into folders. Default folders include "My Projects" and the ability to create new folders. Hover over a folder to access options to download or delete it.
|
||||
- **Search Bar** Enables users to quickly search through their flows and components.
|
||||
- **Select All**: This feature allows users to select all projects displayed on the page for batch actions like moving, deleting, or exporting.
|
||||
|
||||
Click on a flow card to open it in Langflow Workspace or use the **Playground Button** for direct access to execute and interact with the flow’s chatbot interface.
|
||||
|
||||
|
||||
## Folders {#776a3866273f4efbbbb2febdfc1baa12}
|
||||
|
||||
|
||||
Folders can help you keep your projects organized in Langflow. They help you manage and categorize your work efficiently, making it easier to find and access the resources you need.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
Multiple projects can be stored in **folders**.
|
||||
|
||||
|
||||
Folders allow you to categorize flows and components into manageable groups. This makes it easier to find and access specific projects quickly.
|
||||
|
||||
|
||||
**My Projects** is a default folder where all new projects and components are initially stored unless specified otherwise. Users can create custom folders to better organize their work according to specific needs.
|
||||
|
||||
|
||||
Hovering over a folder in Langflow provides options to either remove or download the entire folder, allowing you to keep an offline copy or migrate projects between environments
|
||||
|
||||
|
||||
Create new folders with the **New folder** button. One folder can store multiple projects (as the default My Projects folder does).
|
||||
|
||||
|
||||
You can download folders of projects as a single JSON file, and upload files and flows to your folder.
|
||||
|
||||
|
||||
Click the **Trash** icon to delete a folder.
|
||||
|
||||
|
||||
|
||||
### How to Create Folders {#5ba5abe995c843e4a429e41413f9d539}
|
||||
|
||||
1. **Navigate to the Home Screen:**
|
||||
- Go to your Langflow Home Page (outside of projects).
|
||||
2. **Create a New Folder:**
|
||||
- Click on the "New Folder" button
|
||||
|
||||

|
||||
|
||||
- Double-click the new folder created to rename your folder appropriately to reflect its contents.
|
||||
|
||||

|
||||
|
||||
3. **Move Files:**
|
||||
- Drag and drop files into the corresponding folders and subfolders to keep everything organized.
|
||||
|
||||

|
||||
|
||||
|
||||
### Best Practices for Organizing Folders {#66f23f8e129a48598a7bb4565a508360}
|
||||
|
||||
- **Categorize by Project:** Create a main folder for each project, then add projects for different aspects such as research, drafts, and final documents.
|
||||
- **Use Descriptive Names:** Use clear and descriptive names for your folders to easily identify their contents at a glance.
|
||||
|
||||
### Example Structure {#ebe6acad99c24d6f9aaabf18e4a17ff4}
|
||||
|
||||
|
||||
Here's an example of how you might organize folders and subfolders for a Langflow project:
|
||||
|
||||
|
||||
```text
|
||||
Langflow
|
||||
├── Research
|
||||
│ ├── Articles Project
|
||||
│ ├── Data Project
|
||||
│ └── Notes Project
|
||||
└── Documents
|
||||
├── RAG Project
|
||||
└── Advanced RAG Project
|
||||
|
||||
```
|
||||
|
||||
1
docs/docs/Configuration/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":8, "label":"Configuration"}
|
||||
@@ -1,47 +1,53 @@
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: API Keys
|
||||
sidebar_position: 1
|
||||
slug: /configuration-api-keys
|
||||
---
|
||||
|
||||
# API Keys
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
|
||||
:::caution
|
||||
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
:::
|
||||
|
||||
|
||||
|
||||
|
||||
Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header or query parameter to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow.
|
||||
|
||||
<Admonition type="warning">
|
||||
The default user and password are set using the LANGFLOW_SUPERUSER and
|
||||
LANGFLOW_SUPERUSER_PASSWORD environment variables.
|
||||
|
||||
The default values are `langflow` and `langflow`, respectively.
|
||||
:::caution
|
||||
|
||||
</Admonition>
|
||||
The default user and password are set using the LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD environment variables. The default values are langflow and langflow, respectively.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
|
||||
|
||||
## Generate an API key {#c29986a69cad4cdbbe7537e383ea7207}
|
||||
|
||||
## Generate an API key
|
||||
|
||||
Generate a user-specific token to use with Langflow.
|
||||
|
||||
### Generate an API key with the Langflow UI
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/api-key.png"),
|
||||
dark: useBaseUrl("img/api-key.png"),
|
||||
}}
|
||||
style={{ width: "50%", maxWidth: "600px", margin: "0 auto" }}
|
||||
/>
|
||||
### Generate an API key with the Langflow UI {#3d90098ddd7c44b6836c0273acf57123}
|
||||
|
||||
1. Click on the "API Key" icon.
|
||||
|
||||

|
||||
|
||||
2. Click on "Create new secret key".
|
||||
3. Give it an optional name.
|
||||
4. Click on "Create secret key".
|
||||
5. Copy the API key and store it in a secure location.
|
||||
|
||||
### Generate an API key with the Langflow CLI
|
||||
### Generate an API key with the Langflow CLI {#2368f62fc4b8477e8080c9c2d3659d76}
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow api-key
|
||||
# or
|
||||
python -m langflow api-key
|
||||
@@ -55,31 +61,40 @@ python -m langflow api-key
|
||||
│ │
|
||||
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
|
||||
╰──────────────────────────────
|
||||
|
||||
```
|
||||
|
||||
## Use the Langflow API key
|
||||
|
||||
## Use the Langflow API key {#ae787e4b0d3846aa9094fac75e0ac04f}
|
||||
|
||||
|
||||
Include your API key in API requests to authenticate requests to Langflow.
|
||||
|
||||
### Use the `x-api-key` header
|
||||
|
||||
### Use the `x-api-key` header {#70965b3ad24d467ca4f90e7c13a1f394}
|
||||
|
||||
|
||||
Include the `x-api-key` in the HTTP header when making API requests:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
http://localhost:3000/api/v1/run/<your_flow_id> \
|
||||
-H 'Content-Type: application/json'\
|
||||
-H 'x-api-key: <your api key>'\
|
||||
|
||||
```shell
|
||||
curl -X POST \\
|
||||
<http://localhost:3000/api/v1/run/><your_flow_id> \\
|
||||
-H 'Content-Type: application/json'\\
|
||||
-H 'x-api-key: <your api key>'\\
|
||||
-d '{"inputs": {"text":""}, "tweaks": {}}'
|
||||
|
||||
```
|
||||
|
||||
|
||||
With Python using `requests`:
|
||||
|
||||
|
||||
```python
|
||||
import requests
|
||||
from typing import Optional
|
||||
|
||||
BASE_API_URL = "http://localhost:3001/api/v1/process"
|
||||
BASE_API_URL = "<http://localhost:3001/api/v1/process>"
|
||||
FLOW_ID = "4441b773-0724-434e-9cee-19d995d8f2df"
|
||||
# You can tweak the flow by adding a tweaks dictionary
|
||||
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
|
||||
@@ -114,25 +129,32 @@ def run_flow(inputs: dict,
|
||||
inputs = {"text":""}
|
||||
api_key = "<your api key>"
|
||||
print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
|
||||
|
||||
```
|
||||
|
||||
### Use the query parameter
|
||||
|
||||
### Use the query parameter {#febb797f3bb5403b9f070afc0fa4f453}
|
||||
|
||||
|
||||
Include the API key as a query parameter in the URL:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
http://localhost:3000/api/v1/process/<your_flow_id>?x-api-key=<your_api_key> \
|
||||
-H 'Content-Type: application/json'\
|
||||
|
||||
```shell
|
||||
curl -X POST \\
|
||||
<http://localhost:3000/api/v1/process/><your_flow_id>?x-api-key=<your_api_key> \\
|
||||
-H 'Content-Type: application/json'\\
|
||||
-d '{"inputs": {"text":""}, "tweaks": {}}'
|
||||
|
||||
```
|
||||
|
||||
|
||||
With Python using `requests`:
|
||||
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
BASE_API_URL = "http://localhost:3001/api/v1/process"
|
||||
BASE_API_URL = "<http://localhost:3001/api/v1/process>"
|
||||
FLOW_ID = "4441b773-0724-434e-9cee-19d995d8f2df"
|
||||
# You can tweak the flow by adding a tweaks dictionary
|
||||
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
|
||||
@@ -167,17 +189,23 @@ def run_flow(inputs: dict,
|
||||
inputs = {"text":""}
|
||||
api_key = "<your api key>"
|
||||
print(run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS, apiKey=api_key))
|
||||
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
## Security Considerations {#1273eb69a61344d19827b30dba46dfd5}
|
||||
|
||||
- **Visibility**: For security reasons, the API key cannot be retrieved again through the UI.
|
||||
- **Scope**: The key allows access only to the flows and components of the specific user to whom it was issued.
|
||||
|
||||
## Custom API endpoint
|
||||
## Custom API endpoint {#da933a86690a4fdeac24024472caf8a9}
|
||||
|
||||
Under **Project Settings** > **Endpoint Name**, you can pick a custom name for the endpoint used to call your flow from the API.
|
||||
|
||||
## Revoke an API Key
|
||||
Under **Project Settings** > **Endpoint Name**, you can pick a custom name for the endpoint used to call your flow from the API.
|
||||
|
||||
|
||||
## Revoke an API Key {#f0ea41ea167845cea91bb5e8f90d9df0}
|
||||
|
||||
|
||||
To revoke an API key, delete it from the UI. This action immediately invalidates the key and prevents it from being used again.
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: Authentication
|
||||
sidebar_position: 0
|
||||
slug: /configuration-authentication
|
||||
---
|
||||
|
||||
# Sign Up and Sign In
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
## Sign Up and Sign In {#f480dac5d2094d75a433de0b8e195641}
|
||||
|
||||
|
||||
> ⚠️ WARNING
|
||||
> This page may contain outdated information. It will be updated as soon as possible.
|
||||
>
|
||||
|
||||
|
||||
The login functionality in Langflow serves to authenticate users and protect sensitive routes in the application. Starting from version 0.5, Langflow introduces an enhanced login mechanism that is governed by a few environment variables. This allows new secure features.
|
||||
|
||||
## Environment variables
|
||||
|
||||
## Environment variables {#3ed7cae6f5324ba0ac14783cf2a6cc07}
|
||||
|
||||
|
||||
The following environment variables are crucial in configuring the login settings:
|
||||
|
||||
@@ -24,76 +30,93 @@ The following environment variables are crucial in configuring the login setting
|
||||
|
||||
All of these variables can be passed to the CLI command _`langflow run`_ through the _`--env-file`_ option. For example:
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow run --env-file .env
|
||||
|
||||
```
|
||||
|
||||
<Admonition type="info">
|
||||
It is critical not to expose these environment variables in your code
|
||||
repository. Always set them securely in your deployment environment, for
|
||||
example, using Docker secrets, Kubernetes ConfigMaps/Secrets, or dedicated
|
||||
secure environment configuration systems like AWS Secrets Manager.
|
||||
</Admonition>
|
||||
|
||||
### _`LANGFLOW_AUTO_LOGIN`_
|
||||
> ℹ︎ INFO
|
||||
> It is critical not to expose these environment variables in your code repository. Always set them securely in your deployment environment, for example, using Docker secrets, Kubernetes ConfigMaps/Secrets, or dedicated secure environment configuration systems like AWS Secrets Manager.
|
||||
>
|
||||
|
||||
|
||||
### _`LANGFLOW_AUTO_LOGIN`_ {#8b10059e0fbc44f3bc8ce63fe7692e7e}
|
||||
|
||||
|
||||
By default, this variable is set to `True`. When enabled (`True`), Langflow operates as it did in versions prior to 0.5—automatic login without requiring explicit user authentication.
|
||||
|
||||
|
||||
To disable automatic login and enforce user authentication:
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
export LANGFLOW_AUTO_LOGIN=False
|
||||
```
|
||||
|
||||
### _`LANGFLOW_SUPERUSER`_ and _`LANGFLOW_SUPERUSER_PASSWORD`_
|
||||
|
||||
### _`LANGFLOW_SUPERUSER`_ and _`LANGFLOW_SUPERUSER_PASSWORD`_ {#a61a651a0fc7443a82cec93c07a14503}
|
||||
|
||||
|
||||
These environment variables are only relevant when `LANGFLOW_AUTO_LOGIN` is set to `False`. They specify the username and password for the superuser, which is essential for administrative tasks.
|
||||
|
||||
|
||||
To create a superuser manually:
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
export LANGFLOW_SUPERUSER=admin
|
||||
export LANGFLOW_SUPERUSER_PASSWORD=securepassword
|
||||
```
|
||||
|
||||
|
||||
You can also use the CLI command `langflow superuser` to set up a superuser interactively.
|
||||
|
||||
### _`LANGFLOW_SECRET_KEY`_
|
||||
|
||||
### _`LANGFLOW_SECRET_KEY`_ {#977aea34e6174c58bd76107990d62a1f}
|
||||
|
||||
|
||||
This environment variable holds a secret key used for encrypting the superuser's password. Make sure to set this to a secure, randomly generated string.
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
export LANGFLOW_SECRET_KEY=randomly_generated_secure_key
|
||||
|
||||
```
|
||||
|
||||
### _`LANGFLOW_NEW_USER_IS_ACTIVE`_
|
||||
|
||||
### _`LANGFLOW_NEW_USER_IS_ACTIVE`_ {#c8f5df9283be4e20be51e14518f5272e}
|
||||
|
||||
|
||||
By default, this variable is set to `False`. When enabled (`True`), new users are automatically activated and can log in without requiring explicit activation by the superuser.
|
||||
|
||||
## Manage superusers with the CLI
|
||||
|
||||
## Manage superusers with the CLI {#3b0c36a5cc0f4acc95c884d3de858d46}
|
||||
|
||||
|
||||
Langflow provides a command-line utility for managing superusers:
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow superuser
|
||||
```
|
||||
|
||||
|
||||
This command prompts you to enter the username and password for the superuser, unless they are already set using environment variables.
|
||||
|
||||
## Sign in
|
||||
|
||||
## Sign in {#736ebb8c854b4c268d5e748c119a08ea}
|
||||
|
||||
|
||||
With _`LANGFLOW_AUTO_LOGIN`_ set to _`False`_, Langflow requires users to sign up before they can log in. The sign-up page is the default landing page when a user visits Langflow for the first time.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/sign-up.png"),
|
||||
dark: useBaseUrl("img/sign-up.png"),
|
||||
}}
|
||||
style={{ width: "40%", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||
## Profile settings
|
||||

|
||||
|
||||
|
||||
## Profile settings {#dd5926e12471448d99bd6849d2149dc8}
|
||||
|
||||
|
||||
Once signed in, you can change your profile settings by clicking on the profile icon in the top right corner of the Langflow dashboard. This opens a dropdown menu with the following options:
|
||||
|
||||
@@ -101,34 +124,17 @@ Once signed in, you can change your profile settings by clicking on the profile
|
||||
- **Profile Settings**: Opens the profile settings page.
|
||||
- **Sign Out**: Logs the user out.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/my-account.png"),
|
||||
dark: useBaseUrl("img/my-account.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "20px auto" }}
|
||||
/>
|
||||

|
||||
|
||||
|
||||
Select **Profile Settings** to change your password and your profile picture.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/profile-settings.png"),
|
||||
dark: useBaseUrl("img/profile-settings.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "20px auto" }}
|
||||
/>
|
||||
|
||||

|
||||
|
||||
|
||||
Select **Admin Page** to manage users and groups as the superuser.
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: useBaseUrl("img/admin-page.png"),
|
||||
dark: useBaseUrl("img/admin-page.png"),
|
||||
}}
|
||||
style={{ maxWidth: "600px", margin: "0 auto" }}
|
||||
|
||||
/>
|
||||

|
||||
|
||||
@@ -1,36 +1,41 @@
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: Backend-Only
|
||||
sidebar_position: 4
|
||||
slug: /configuration-backend-only
|
||||
---
|
||||
|
||||
# Backend-only
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
> ⚠️ WARNING
|
||||
> This page may contain outdated information. It will be updated as soon as possible.
|
||||
|
||||
|
||||
You can run Langflow in `--backend-only` mode to expose your Langflow app as an API, without running the frontend UI.
|
||||
|
||||
|
||||
Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
|
||||
|
||||
|
||||
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
|
||||
Langflow will now serve requests to its API without the frontend running.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Langflow installed](../getting-started/install-langflow)
|
||||
## Prerequisites {#81dfa9407ed648889081b9d08b0e5cfe}
|
||||
|
||||
- [OpenAI API key](https://platform.openai.com)
|
||||
- [Langflow installed](/getting-started-installation)
|
||||
- [OpenAI API key](https://platform.openai.com/)
|
||||
- [A Langflow flow created](/starter-projects-basic-prompting)
|
||||
|
||||
- [A Langflow flow created](../starter-projects/basic-prompting)
|
||||
|
||||
## Download your flow's curl call
|
||||
## Download your flow's curl call {#d2cf1b694e4741eca07fd9806516007b}
|
||||
|
||||
1. Click API.
|
||||
2. Click **curl** > **Copy code** and save the code to your local machine.
|
||||
It will look something like this:
|
||||
2. Click **curl** > **Copy code** and save the code to your local machine.
|
||||
It will look something like this:
|
||||
|
||||
```curl
|
||||
curl -X POST \
|
||||
"http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false" \
|
||||
-H 'Content-Type: application/json'\
|
||||
```text
|
||||
curl -X POST \\
|
||||
"<http://127.0.0.1:7864/api/v1/run/ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef?stream=false>" \\
|
||||
-H 'Content-Type: application/json'\\
|
||||
-d '{"input_value": "message",
|
||||
"output_type": "chat",
|
||||
"input_type": "chat",
|
||||
@@ -40,39 +45,46 @@ curl -X POST \
|
||||
"ChatOutput-ktwdw": {},
|
||||
"ChatInput-xXC4F": {}
|
||||
}}'
|
||||
|
||||
```
|
||||
|
||||
|
||||
Note the flow ID of `ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef`. You can find this ID in the UI as well to ensure you're querying the right flow.
|
||||
|
||||
## Start Langflow in backend-only mode
|
||||
|
||||
## Start Langflow in backend-only mode {#f0ba018daf3041c39c0d226dadf78d35}
|
||||
|
||||
1. Stop Langflow with Ctrl+C.
|
||||
2. Start langflow in backend-only mode with `python3 -m langflow run --backend-only`.
|
||||
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
|
||||
Langflow will now serve requests to its API.
|
||||
The terminal prints `Welcome to ⛓ Langflow`, and a blank window opens at `http://127.0.0.1:7864/all`.
|
||||
Langflow will now serve requests to its API.
|
||||
3. Run the curl code you copied from the UI.
|
||||
You should get a result like this:
|
||||
You should get a result like this:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}%
|
||||
|
||||
```
|
||||
|
||||
|
||||
Again, note that the flow ID matches.
|
||||
Langflow is receiving your POST request, running the flow, and returning the result, all without running the frontend. Cool!
|
||||
|
||||
## Download your flow's Python API call
|
||||
|
||||
## Download your flow's Python API call {#5923ff9dc40843c7a22a72fa6c66540c}
|
||||
|
||||
|
||||
Instead of using curl, you can download your flow as a Python API call instead.
|
||||
|
||||
1. Click API.
|
||||
2. Click **Python API** > **Copy code** and save the code to your local machine.
|
||||
The code will look something like this:
|
||||
2. Click **Python API** > **Copy code** and save the code to your local machine.
|
||||
The code will look something like this:
|
||||
|
||||
```python
|
||||
import requests
|
||||
from typing import Optional
|
||||
|
||||
BASE_API_URL = "http://127.0.0.1:7864/api/v1/run"
|
||||
BASE_API_URL = "<http://127.0.0.1:7864/api/v1/run>"
|
||||
FLOW_ID = "ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef"
|
||||
# You can tweak the flow by adding a tweaks dictionary
|
||||
# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
|
||||
@@ -83,47 +95,55 @@ def run_flow(message: str,
|
||||
input_type: str = "chat",
|
||||
tweaks: Optional[dict] = None,
|
||||
api_key: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Run a flow with a given message and optional tweaks.
|
||||
"""Run a flow with a given message and optional tweaks.
|
||||
|
||||
:param message: The message to send to the flow
|
||||
:param flow_id: The ID of the flow to run
|
||||
:param tweaks: Optional tweaks to customize the flow
|
||||
:return: The JSON response from the flow
|
||||
"""
|
||||
api_url = f"{BASE_API_URL}/{flow_id}"
|
||||
:param message: The message to send to the flow
|
||||
:param flow_id: The ID of the flow to run
|
||||
:param tweaks: Optional tweaks to customize the flow
|
||||
:return: The JSON response from the flow
|
||||
"""
|
||||
api_url = f"{BASE_API_URL}/{flow_id}"
|
||||
payload = {
|
||||
"input_value": message,
|
||||
"output_type": output_type,
|
||||
"input_type": input_type,
|
||||
}
|
||||
headers = None
|
||||
if tweaks:
|
||||
payload["tweaks"] = tweaks
|
||||
if api_key:
|
||||
headers = {"x-api-key": api_key}
|
||||
response = requests.post(api_url, json=payload, headers=headers)
|
||||
return response.json()
|
||||
|
||||
payload = {
|
||||
"input_value": message,
|
||||
"output_type": output_type,
|
||||
"input_type": input_type,
|
||||
}
|
||||
headers = None
|
||||
if tweaks:
|
||||
payload["tweaks"] = tweaks
|
||||
if api_key:
|
||||
headers = {"x-api-key": api_key}
|
||||
response = requests.post(api_url, json=payload, headers=headers)
|
||||
return response.json()
|
||||
# Setup any tweaks you want to apply to the flow
|
||||
|
||||
# Setup any tweaks you want to apply to the flow
|
||||
message = "message"
|
||||
message = "message"
|
||||
|
||||
print(run_flow(message=message, flow_id=FLOW_ID))
|
||||
|
||||
print(run_flow(message=message, flow_id=FLOW_ID))
|
||||
```
|
||||
|
||||
|
||||
3. Run your Python app:
|
||||
|
||||
```python
|
||||
|
||||
```shell
|
||||
python3 app.py
|
||||
```
|
||||
|
||||
|
||||
The result is similar to the curl call:
|
||||
|
||||
```bash
|
||||
|
||||
```json
|
||||
{'session_id': 'ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880', 'outputs': [{'inputs': {'input_value': 'message'}, 'outputs': [{'results': {'result': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!"}, 'artifacts': {'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI'}, 'messages': [{'message': "Arrr matey! What be yer message for this ol' pirate? Speak up or walk the plank!", 'sender': 'Machine', 'sender_name': 'AI', 'component_id': 'ChatOutput-ktwdw'}], 'component_display_name': 'Chat Output', 'component_id': 'ChatOutput-ktwdw', 'used_frozen_result': False}]}]}
|
||||
|
||||
```
|
||||
|
||||
|
||||
Your Python app POSTs to your Langflow server, and the server runs the flow and returns the result.
|
||||
|
||||
See [API](../administration/api) for more ways to interact with your headless Langflow server.
|
||||
|
||||
See [API](https://www.notion.so/administration/api) for more ways to interact with your headless Langflow server.
|
||||
|
||||
@@ -1,34 +1,44 @@
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: Command Line Interface (CLI)
|
||||
sidebar_position: 2
|
||||
slug: /configuration-cli
|
||||
---
|
||||
|
||||
# Command Line Interface (CLI)
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
> ⚠️ WARNING
|
||||
> This page may contain outdated information. It will be updated as soon as possible.
|
||||
>
|
||||
|
||||
|
||||
Langflow's Command Line Interface (CLI) is a powerful tool that allows you to interact with the Langflow server from the command line. The CLI provides a wide range of commands to help you shape Langflow to your needs.
|
||||
|
||||
|
||||
The available commands are below. Navigate to their individual sections of this page to see the parameters.
|
||||
|
||||
- [langflow](#overview)
|
||||
- [langflow api-key](#langflow-api-key)
|
||||
- [langflow copy-db](#langflow-copy-db)
|
||||
- [langflow migration](#langflow-migration)
|
||||
- [langflow run](#langflow-run)
|
||||
- [langflow superuser](#langflow-superuser)
|
||||
- [langflow](/configuration-cli)
|
||||
- [langflow api-key](/configuration-cli)
|
||||
- [langflow copy-db](/configuration-cli)
|
||||
- [langflow migration](/configuration-cli)
|
||||
- [langflow run](/configuration-cli)
|
||||
- [langflow superuser](/configuration-cli)
|
||||
|
||||
## Overview {#c50e5530289349cf8ed7bee22ba2211a}
|
||||
|
||||
## Overview
|
||||
|
||||
Running the CLI without any arguments displays a list of available options and commands.
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow
|
||||
# or
|
||||
langflow --help
|
||||
# or
|
||||
python -m langflow
|
||||
|
||||
```
|
||||
|
||||
|
||||
| Command | Description |
|
||||
| ----------- | ---------------------------------------------------------------------- |
|
||||
| `api-key` | Creates an API key for the default superuser if AUTO_LOGIN is enabled. |
|
||||
@@ -37,7 +47,9 @@ python -m langflow
|
||||
| `run` | Run the Langflow. |
|
||||
| `superuser` | Create a superuser. |
|
||||
|
||||
### Options
|
||||
|
||||
### Options {#8a3b5b7ed55b4774ad6d533bb337ef47}
|
||||
|
||||
|
||||
| Option | Description |
|
||||
| ---------------------- | -------------------------------------------------------------------------------- |
|
||||
@@ -45,11 +57,14 @@ python -m langflow
|
||||
| `--show-completion` | Show completion for the current shell, to copy it or customize the installation. |
|
||||
| `--help` | Show this message and exit. |
|
||||
|
||||
## langflow api-key
|
||||
|
||||
## langflow api-key {#dbfc8c4c83474b83a38bdc7471bccf41}
|
||||
|
||||
|
||||
Run the `api-key` command to create an API key for the default superuser if `LANGFLOW_AUTO_LOGIN` is set to `True`.
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow api-key
|
||||
# or
|
||||
python -m langflow api-key
|
||||
@@ -63,36 +78,50 @@ python -m langflow api-key
|
||||
│ │
|
||||
│ The API key has been copied to your clipboard. Cmd + V to paste it. │
|
||||
╰──────────────────────────────
|
||||
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
### Options {#ec2ef993dc984811b25838c8d8230b31}
|
||||
|
||||
|
||||
| Option | Type | Description |
|
||||
| ----------- | ---- | ------------------------------------------------------------- |
|
||||
| --log-level | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
|
||||
| --help | | Show this message and exit. |
|
||||
|
||||
## langflow copy-db
|
||||
|
||||
## langflow copy-db {#729a13f4847545e5973d8f9c20f8833d}
|
||||
|
||||
|
||||
Run the `copy-db` command to copy the cached `langflow.db` and `langflow-pre.db` database files to the current directory.
|
||||
|
||||
|
||||
If the files exist in the cache directory, they will be copied to the same directory as `__main__.py`, which can be found with `which langflow`.
|
||||
|
||||
### Options
|
||||
|
||||
### Options {#7b7e6bd02b3243218e1d666711854673}
|
||||
|
||||
|
||||
None.
|
||||
|
||||
## langflow migration
|
||||
|
||||
Run or test migrations with the [Alembic](https://pypi.org/project/alembic/) database tool.
|
||||
## langflow migration {#7027c1925a444119a7a8ea2bff4bd16d}
|
||||
|
||||
```bash
|
||||
|
||||
Run or test migrations with the Alembic database tool.
|
||||
|
||||
|
||||
```shell
|
||||
langflow migration
|
||||
# or
|
||||
python -m langflow migration
|
||||
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
### Options {#0b38fbe97bb34edeb7740a7db58433e9}
|
||||
|
||||
|
||||
| Option | Description |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------------------------------------- |
|
||||
@@ -100,56 +129,71 @@ python -m langflow migration
|
||||
| `--fix, --no-fix` | Fix migrations. This is a destructive operation, and should only be used if you know what you are doing. [default: no-fix] |
|
||||
| `--help` | Show this message and exit. |
|
||||
|
||||
## langflow run
|
||||
|
||||
## langflow run {#fe050aa659cb4d33a560b859d54c94ea}
|
||||
|
||||
|
||||
Run Langflow.
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow run
|
||||
# or
|
||||
python -m langflow run
|
||||
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ---------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--help` | Displays all available options. |
|
||||
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
|
||||
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
|
||||
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
|
||||
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
|
||||
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
|
||||
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
|
||||
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
|
||||
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
|
||||
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
|
||||
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
|
||||
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
|
||||
| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
|
||||
| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
|
||||
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. |
|
||||
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
|
||||
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. For more, see [Backend-only](../deployment/backend-only). |
|
||||
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
|
||||
### Options {#4e811481ec9142f1b60309bb1ce5a2ce}
|
||||
|
||||
|
||||
| Option | Description |
|
||||
| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--help` | Displays all available options. |
|
||||
| `--host` | Defines the host to bind the server to. Can be set using the `LANGFLOW_HOST` environment variable. The default is `127.0.0.1`. |
|
||||
| `--workers` | Sets the number of worker processes. Can be set using the `LANGFLOW_WORKERS` environment variable. The default is `1`. |
|
||||
| `--timeout` | Sets the worker timeout in seconds. The default is `60`. |
|
||||
| `--port` | Sets the port to listen on. Can be set using the `LANGFLOW_PORT` environment variable. The default is `7860`. |
|
||||
| `--env-file` | Specifies the path to the .env file containing environment variables. The default is `.env`. |
|
||||
| `--log-level` | Defines the logging level. Can be set using the `LANGFLOW_LOG_LEVEL` environment variable. The default is `critical`. |
|
||||
| `--components-path` | Specifies the path to the directory containing custom components. Can be set using the `LANGFLOW_COMPONENTS_PATH` environment variable. The default is `langflow/components`. |
|
||||
| `--log-file` | Specifies the path to the log file. Can be set using the `LANGFLOW_LOG_FILE` environment variable. The default is `logs/langflow.log`. |
|
||||
| `--cache` | Select the type of cache to use. Options are `InMemoryCache` and `SQLiteCache`. Can be set using the `LANGFLOW_LANGCHAIN_CACHE` environment variable. The default is `SQLiteCache`. |
|
||||
| `--dev`/`--no-dev` | Toggles the development mode. The default is `no-dev`. |
|
||||
| `--path` | Specifies the path to the frontend directory containing build files. This option is for development purposes only. Can be set using the `LANGFLOW_FRONTEND_PATH` environment variable. |
|
||||
| `--open-browser`/`--no-open-browser` | Toggles the option to open the browser after starting the server. Can be set using the `LANGFLOW_OPEN_BROWSER` environment variable. The default is `open-browser`. |
|
||||
| `--remove-api-keys`/`--no-remove-api-keys` | Toggles the option to remove API keys from the projects saved in the database. Can be set using the `LANGFLOW_REMOVE_API_KEYS` environment variable. The default is `no-remove-api-keys`. |
|
||||
| `--install-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Installs completion for the specified shell. |
|
||||
| `--show-completion [bash\|zsh\|fish\|powershell\|pwsh]` | Shows completion for the specified shell, allowing you to copy it or customize the installation. |
|
||||
| `--backend-only` | This parameter, with a default value of `False`, allows running only the backend server without the frontend. It can also be set using the `LANGFLOW_BACKEND_ONLY` environment variable. |
|
||||
| `--store` | This parameter, with a default value of `True`, enables the store features, use `--no-store` to deactivate it. It can be configured using the `LANGFLOW_STORE` environment variable. |
|
||||
|
||||
|
||||
### CLI environment variables {#5868aaccfcc74e26968538ef4d07e756}
|
||||
|
||||
#### CLI environment variables
|
||||
|
||||
You can configure many of the CLI options using environment variables. These can be exported in your operating system or added to a `.env` file and loaded using the `--env-file` option.
|
||||
|
||||
|
||||
A sample `.env` file named `.env.example` is included with the project. Copy this file to a new file named `.env` and replace the example values with your actual settings. If you're setting values in both your OS and the `.env` file, the `.env` settings will take precedence.
|
||||
|
||||
## langflow superuser
|
||||
|
||||
## langflow superuser {#5944233ce0c942878e928e1f2945d717}
|
||||
|
||||
|
||||
Create a superuser for Langflow.
|
||||
|
||||
```bash
|
||||
|
||||
```shell
|
||||
langflow superuser
|
||||
# or
|
||||
python -m langflow superuser
|
||||
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
### Options {#f333c5635ead4c3d95985467bb08cc8f}
|
||||
|
||||
|
||||
| Option | Type | Description |
|
||||
| ------------- | ---- | ------------------------------------------------------------- |
|
||||
@@ -157,3 +201,4 @@ python -m langflow superuser
|
||||
| `--password` | TEXT | Password for the superuser. [default: None] [required] |
|
||||
| `--log-level` | TEXT | Logging level. [env var: LANGFLOW_LOG_LEVEL] [default: error] |
|
||||
| `--help` | | Show this message and exit. |
|
||||
|
||||
BIN
docs/docs/Contributing/683296796.png
Normal file
|
After Width: | Height: | Size: 111 KiB |
1
docs/docs/Contributing/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":10, "label":"Contributing"}
|
||||
55
docs/docs/Contributing/contributing-community.md
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
title: Community
|
||||
sidebar_position: 3
|
||||
slug: /contributing-community
|
||||
---
|
||||
|
||||
|
||||
|
||||
## 🤖 Join **Langflow** Discord server {#80011e0bda004e83a8012c7ec6eab29a}
|
||||
|
||||
|
||||
Join us to ask questions and showcase your projects.
|
||||
|
||||
|
||||
Let's bring together the building blocks of AI integration!
|
||||
|
||||
|
||||
Langflow [Discord](https://discord.gg/EqksyE2EX9) server.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 🐦 Stay tuned for **Langflow** on Twitter {#6a17ba5905ad4f7aa5347af7854779f6}
|
||||
|
||||
|
||||
Follow [@langflow_ai](https://twitter.com/langflow_ai) on **Twitter** to get the latest news about **Langflow**.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## ⭐️ Star **Langflow** on GitHub {#c903a569934643799bf52b7d1b3514e1}
|
||||
|
||||
|
||||
You can "star" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow).
|
||||
|
||||
|
||||
By adding a star, other users will be able to find it more easily and see that it has been already useful for others.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 👀 Watch the GitHub repository for releases {#d0a089ed717742308bd17430e5ae6309}
|
||||
|
||||
|
||||
You can "watch" **Langflow** in [GitHub](https://github.com/langflow-ai/langflow). If you select "Watching" instead of "Releases only" you will receive notifications when someone creates a new issue or question. You can also specify that you only want to be notified about new issues, discussions, PRs, etc. so you can try and help them solve those questions.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Thanks! 🚀
|
||||
|
||||
21
docs/docs/Contributing/contributing-github-issues.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
title: GitHub Issues
|
||||
sidebar_position: 2
|
||||
slug: /contributing-github-issues
|
||||
---
|
||||
|
||||
|
||||
|
||||
Our [issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date with bugs, improvements, and feature requests. There is a taxonomy of labels to help with sorting and discovery of issues of interest.
|
||||
|
||||
|
||||
If you're looking for help with your code, consider posting a question on the [GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please understand that we won't be able to provide individual support via email. We also believe that help is much more valuable if it's **shared publicly**, so that more people can benefit from it.
|
||||
|
||||
- **Describing your issue:** Try to provide as many details as possible. What exactly goes wrong? _How_ is it failing? Is there an error? "XY doesn't work" usually isn't that helpful for tracking down problems. Always remember to include the code you ran and if possible, extract only the relevant parts and don't just dump your entire script. This will make it easier for us to reproduce the error.
|
||||
- **Sharing long blocks of code or logs:** If you need to include long code, logs or tracebacks, you can wrap them in `<details>` and `</details>`. This [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so it only becomes visible on click, making the issue easier to read and follow.
|
||||
|
||||
## Issue labels {#e19eae656c914ce7aedc4f55565cc0bc}
|
||||
|
||||
|
||||
[See this page](https://github.com/langflow-ai/langflow/labels) for an overview of the system we use to tag our issues and pull requests.
|
||||
|
||||
141
docs/docs/Contributing/contributing-how-to-contribute.md
Normal file
@@ -0,0 +1,141 @@
|
||||
---
|
||||
title: How to contribute?
|
||||
sidebar_position: 1
|
||||
slug: /contributing-how-to-contribute
|
||||
---
|
||||
|
||||
|
||||
|
||||
👋 Hello there! We welcome contributions from developers of all levels to our open-source project on [GitHub](https://github.com/langflow-ai/langflow). If you'd like to contribute, please check our contributing guidelines and help make Langflow more accessible.
|
||||
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether in the form of a new feature, improved infra, or better documentation.
|
||||
|
||||
|
||||
To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
|
||||
|
||||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Local development {#0388cc3c758d434d994022863a6bafa9}
|
||||
|
||||
|
||||
You can develop Langflow using docker compose, or locally.
|
||||
|
||||
|
||||
We provide a `.vscode/launch.json` file for debugging the backend in VSCode, which is a lot faster than using docker compose.
|
||||
|
||||
|
||||
Setting up hooks:
|
||||
|
||||
|
||||
`make init`
|
||||
|
||||
|
||||
This will install the pre-commit hooks, which will run `make format` on every commit.
|
||||
|
||||
|
||||
It is advised to run `make lint` before pushing to the repository.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Run locally {#5225c2ef0cd6403c9f6c6bbd888115e0}
|
||||
|
||||
|
||||
Langflow can run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system.
|
||||
|
||||
|
||||
Before you start, make sure you have the following installed:
|
||||
|
||||
- Poetry (>=1.4)
|
||||
- Node.js
|
||||
|
||||
Then, in the root folder, install the dependencies and start the development server for the backend:
|
||||
|
||||
|
||||
`make backend`
|
||||
|
||||
|
||||
And the frontend:
|
||||
|
||||
|
||||
`make frontend`
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Docker compose {#b07f359414ff4220ac615afc364ee46e}
|
||||
|
||||
|
||||
The following snippet will run the backend and frontend in separate containers. The frontend will be available at `localhost:3000` and the backend at `localhost:7860`.
|
||||
|
||||
|
||||
`docker compose up --build# ormake dev build=1`
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Documentation {#5f34bcaeccdc4489b0c5ee2c4a21354e}
|
||||
|
||||
|
||||
The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands:
|
||||
|
||||
|
||||
`cd docsnpm installnpm run start`
|
||||
|
||||
|
||||
The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder. Once you are done with your changes, you can create a Pull Request to the `main` branch.
|
||||
|
||||
|
||||
## Submitting Components {#9676353bc4504551a4014dd572ac8be8}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
New components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class and any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
|
||||
|
||||
|
||||
## Add an example component {#8caae106c853465d83183e7f5272e4d8}
|
||||
|
||||
|
||||
You have a new document loader called **MyCustomDocumentLoader** and it would look awesome in Langflow.
|
||||
|
||||
1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/interface/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods.
|
||||
2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component.
|
||||
3. Implement the `build_config` method to define the configuration options for your custom component.
|
||||
4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output.
|
||||
5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder.
|
||||
6. Add the dependency to [/documentloaders/__init__.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`.
|
||||
7. Add any new dependencies to the outer [pyproject.toml](https://github.com/langflow-ai/langflow/blob/dev/pyproject.toml#L27) file.
|
||||
8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/dev/docs/docs/components/loaders).
|
||||
9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow.
|
||||
|
||||
## User Sharing {#34ac32e11f344eab892b94531a21d2c9}
|
||||
|
||||
|
||||
You might want to share and test your custom component with others, but don't need it merged into the main source code.
|
||||
|
||||
|
||||
If so, you can share your component on the Langflow store.
|
||||
|
||||
1. [Register at the Langflow store](https://www.langflow.store/login/).
|
||||
2. Undergo pre-validation before receiving an API key.
|
||||
3. To deploy your amazing component directly to the Langflow store, without it being merged into the main source code, navigate to your flow, and then click **Share**. The share window appears:
|
||||
|
||||

|
||||
|
||||
|
||||
4. Choose whether you want to flow to be public or private. You can also **Export** your flow as a JSON file from this window. When you're ready to share the flow, click **Share Flow**. You should see a **Flow shared successfully** popup.
|
||||
|
||||
|
||||
5. To confirm, navigate to the **Langflow Store** and filter results by **Created By Me**. You should see your new flow on the **Langflow Store**.
|
||||
|
||||
@@ -1,30 +1,38 @@
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: Telemetry
|
||||
sidebar_position: 0
|
||||
slug: /contributing-telemetry
|
||||
---
|
||||
|
||||
|
||||
# Telemetry
|
||||
|
||||
Our system uses anonymous telemetry to collect essential usage statistics to enhance functionality and user experience. This data helps us identify commonly used features and areas needing improvement, ensuring our development efforts align with what you need.
|
||||
|
||||
<Admonition type="info">
|
||||
|
||||
INFO
|
||||
|
||||
|
||||
We respect your privacy and are committed to protecting your data. We do not collect any personal information or sensitive data. All telemetry data is anonymized and used solely for improving Langflow.
|
||||
|
||||
You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This will disable telemetry data collection.
|
||||
|
||||
</Admonition>
|
||||
You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This will disable telemetry data collection.
|
||||
|
||||
## Data Collected Includes:
|
||||
|
||||
### Run
|
||||
## Data Collected Includes: {#1734ed50fb4a4a45aaa84185b44527ca}
|
||||
|
||||
|
||||
### Run {#2d427dca4f0148ae867997f6789e8bfb}
|
||||
|
||||
- **IsWebhook**: Indicates whether the operation was triggered via a webhook.
|
||||
- **Seconds**: Duration in seconds for how long the operation lasted, providing insights into performance.
|
||||
- **Success**: Boolean value indicating whether the operation was successful, helping identify potential errors or issues.
|
||||
- **ErrorMessage**: Provides error message details if the operation was unsuccessful, aiding in troubleshooting and enhancements.
|
||||
|
||||
### Shutdown
|
||||
### Shutdown {#081e4bd4faec430fb05b657026d1a69c}
|
||||
|
||||
- **Time Running**: Total runtime before shutdown, useful for understanding application lifecycle and optimizing uptime.
|
||||
|
||||
### Version
|
||||
### Version {#dc09f6aba6c64c7b8dad3d86a7cba6d6}
|
||||
|
||||
- **Version**: The specific version of Langflow used, which helps in tracking feature adoption and compatibility.
|
||||
- **Platform**: Operating system of the host machine, which aids in focusing our support for popular platforms like Windows, macOS, and Linux.
|
||||
@@ -34,18 +42,18 @@ You can opt-out of telemetry by setting the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_T
|
||||
- **CacheType**: Type of caching mechanism used, which impacts performance and efficiency.
|
||||
- **BackendOnly**: Boolean indicating whether you are running Langflow in a backend-only mode, useful for understanding deployment configurations.
|
||||
|
||||
### Playground
|
||||
### Playground {#ae6c3859f612441db3c15a7155e9f920}
|
||||
|
||||
- **Seconds**: Duration in seconds for playground execution, offering insights into performance during testing or experimental stages.
|
||||
- **ComponentCount**: Number of components used in the playground, which helps understand complexity and usage patterns.
|
||||
- **Success**: Success status of the playground operation, aiding in identifying the stability of experimental features.
|
||||
|
||||
### Component
|
||||
### Component {#630728d6654c40a6b8901459a4bc3a4e}
|
||||
|
||||
- **Name**: Identifies the component, providing data on which components are most utilized or prone to issues.
|
||||
- **Seconds**: Time taken by the component to execute, offering performance metrics.
|
||||
- **Success**: Whether the component operated successfully, which helps in quality control.
|
||||
- **ErrorMessage**: Details of any errors encountered, crucial for debugging and improvement.
|
||||
|
||||
This telemetry data is crucial for enhancing Langflow and ensuring that our development efforts align with your needs.
|
||||
Your feedback and suggestions are invaluable in shaping the future of Langflow, and we appreciate your support in making Langflow better for everyone.
|
||||
This telemetry data is crucial for enhancing Langflow and ensuring that our development efforts align with your needs. Your feedback and suggestions are invaluable in shaping the future of Langflow, and we appreciate your support in making Langflow better for everyone.
|
||||
|
||||
BIN
docs/docs/Deployment/1098199232.png
Normal file
|
After Width: | Height: | Size: 764 KiB |
BIN
docs/docs/Deployment/1861599636.png
Normal file
|
After Width: | Height: | Size: 414 KiB |
BIN
docs/docs/Deployment/1929176153.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
BIN
docs/docs/Deployment/257215214.png
Normal file
|
After Width: | Height: | Size: 686 KiB |
1
docs/docs/Deployment/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":6, "label":"Deployment"}
|
||||
76
docs/docs/Deployment/deployment-docker.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
title: Docker
|
||||
sidebar_position: 2
|
||||
slug: /deployment-docker
|
||||
---
|
||||
|
||||
|
||||
|
||||
This guide will help you get LangFlow up and running using Docker and Docker Compose.
|
||||
|
||||
|
||||
## Prerequisites {#856bb2d98156402bbd1980365b98110c}
|
||||
|
||||
- Docker
|
||||
- Docker Compose
|
||||
|
||||
## Clone repo and build Docker container {#ba89773aa8b8425b985bfe7ba91c35cc}
|
||||
|
||||
1. Clone the LangFlow repository:
|
||||
|
||||
`git clone https://github.com/langflow-ai/langflow.git`
|
||||
|
||||
2. Navigate to the `docker_example` directory:
|
||||
|
||||
`cd langflow/docker_example`
|
||||
|
||||
3. Run the Docker Compose file:
|
||||
|
||||
`docker compose up`
|
||||
|
||||
|
||||
LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
|
||||
|
||||
|
||||
## Docker Compose configuration {#02226209cad24185a6ec5b69bd820d0f}
|
||||
|
||||
|
||||
The Docker Compose configuration spins up two services: `langflow` and `postgres`.
|
||||
|
||||
|
||||
### LangFlow service {#d749848451ea43bd86f6f096dc77e6e6}
|
||||
|
||||
|
||||
The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
|
||||
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
|
||||
- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
|
||||
|
||||
Volumes:
|
||||
|
||||
- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
|
||||
|
||||
### PostgreSQL service {#121140decbfe4997b12213bdd2c4da7e}
|
||||
|
||||
|
||||
The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
|
||||
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `POSTGRES_USER`: The username for the PostgreSQL database.
|
||||
- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
|
||||
- `POSTGRES_DB`: The name of the PostgreSQL database.
|
||||
|
||||
Volumes:
|
||||
|
||||
- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
|
||||
|
||||
## Switch to a specific LangFlow version {#2b3e191ea48f4feab89242433cf012d5}
|
||||
|
||||
|
||||
If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.
|
||||
|
||||
@@ -1,39 +1,56 @@
|
||||
import Admonition from "@theme/Admonition";
|
||||
---
|
||||
title: GCP
|
||||
sidebar_position: 3
|
||||
slug: /deployment-gcp
|
||||
---
|
||||
|
||||
# Deploy on Google Cloud Platform
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
## Run Langflow from a New Google Cloud Project
|
||||
# Deploy on Google Cloud Platform {#4ee01cda736c4f7396936409f23cdb52}
|
||||
|
||||
|
||||
> Warning: This page may contain outdated information. It will be updated as soon as possible.
|
||||
|
||||
|
||||
## Run Langflow from a New Google Cloud Project {#ce729796d7404ccdb627bee47d6a4399}
|
||||
|
||||
|
||||
This guide will help you set up a Langflow development VM in a Google Cloud Platform project using Google Cloud Shell.
|
||||
|
||||
> Note: When Cloud Shell opens, be sure to select **Trust repo**. Some `gcloud` commands might not run in an ephemeral Cloud Shell environment.
|
||||
|
||||
## Standard VM
|
||||
> Note: When Cloud Shell opens, be sure to select Trust repo. Some gcloud commands might not run in an ephemeral Cloud Shell environment.
|
||||
|
||||
|
||||
## Standard VM {#245b47b450dd4159a5c56a5124bab84f}
|
||||
|
||||
|
||||
[embed](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md)
|
||||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md)
|
||||
|
||||
This script sets up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow Dev environment.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Spot/Preemptible Instance
|
||||
|
||||
[](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/genome21/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial_spot.md)
|
||||
## Spot/Preemptible Instance {#de9b8f7c71284cbb98e8137a3c44553d}
|
||||
|
||||
|
||||
When running as a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Pricing (approximate)
|
||||
|
||||
> For a more accurate breakdown of costs, please use the [**GCP Pricing Calculator**](https://cloud.google.com/products/calculator)
|
||||
## Pricing (approximate) {#2289f4ba9f544e6e9d4b915ef5aacd24}
|
||||
|
||||
|
||||
> For a more accurate breakdown of costs, please use the GCP Pricing Calculator
|
||||
|
||||
|
||||
| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
|
||||
| ------------------ | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | -------------------------------------------------------------------------- |
|
||||
| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
|
||||
| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
|
||||
| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |
|
||||
|
||||
26
docs/docs/Deployment/deployment-hugging-face-spaces.md
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
title: 🤗 Spaces
|
||||
sidebar_position: 0
|
||||
slug: /deployment-hugging-face-spaces
|
||||
---
|
||||
|
||||
|
||||
|
||||
# HuggingFace Spaces {#00f5b3a6818d496dbb18e1a6a910e57d}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
HuggingFace provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow in the cloud without any local installation required. Here's how you can get Langflow up and running on HuggingFace Spaces:
|
||||
|
||||
1. **Access Langflow Space**: Open a Chromium-based browser and navigate to the [Langflow Space](https://huggingface.co/spaces/Langflow/Langflow?duplicate=true). This link directs you to a pre-configured environment for Langflow.
|
||||
2. **Duplicate the Space**: You'll encounter an option to duplicate the Langflow space. This step involves a few simple decisions:
|
||||
- **Naming Your Space**: Assign a unique name to your new Space.
|
||||
- **Visibility Settings**: Choose between Public or Private visibility for your Space.
|
||||
- After setting these parameters, click on **Duplicate Space** to initiate the setup.
|
||||
|
||||

|
||||
|
||||
1. **Complete Installation**: The duplication and setup process begins immediately after you click **Duplicate Space**. Once completed, you will be automatically redirected to the main page of your new Space.
|
||||
2. **Start Exploring Langflow**: With the setup complete, Langflow is now ready for use in your Space and you can start exploring its features and capabilities right away!
|
||||
361
docs/docs/Deployment/deployment-kubernetes.md
Normal file
@@ -0,0 +1,361 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
sidebar_position: 1
|
||||
slug: /deployment-kubernetes
|
||||
---
|
||||
|
||||
|
||||
|
||||
This guide will help you get LangFlow up and running in Kubernetes cluster, including the following steps:
|
||||
|
||||
- Install [LangFlow as IDE](/deployment-kubernetes) in a Kubernetes cluster (for development)
|
||||
- Install [LangFlow as a standalone application](/deployment-kubernetes) in a Kubernetes cluster (for production runtime workloads)
|
||||
|
||||
## LangFlow (IDE) {#cb60b2f34e70490faf231cb0fe1a4b42}
|
||||
|
||||
|
||||
This solution is designed to provide a complete environment for developers to create, test, and debug their flows. It includes both the API and the UI.
|
||||
|
||||
|
||||
### Prerequisites {#3efd3c63ff8849228c136f9252e504fd}
|
||||
|
||||
- Kubernetes server
|
||||
- kubectl
|
||||
- Helm
|
||||
|
||||
### Step 0. Prepare a Kubernetes cluster {#290b9624770a4c1ba2c889d384b7ef4c}
|
||||
|
||||
|
||||
We use [Minikube](https://minikube.sigs.k8s.io/docs/start/) for this example, but you can use any Kubernetes cluster.
|
||||
|
||||
1. Create a Kubernetes cluster on Minikube.
|
||||
|
||||
```text
|
||||
minikube start
|
||||
```
|
||||
|
||||
2. Set `kubectl` to use Minikube.
|
||||
|
||||
```text
|
||||
kubectl config use-context minikube
|
||||
```
|
||||
|
||||
|
||||
### Step 1. Install the LangFlow Helm chart {#b5c2a35144634a05a392f7e650929efe}
|
||||
|
||||
1. Add the repository to Helm.
|
||||
|
||||
```text
|
||||
helm repo add langflow <https://langflow-ai.github.io/langflow-helm-charts>
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. Install LangFlow with the default options in the `langflow` namespace.
|
||||
|
||||
```text
|
||||
helm install langflow-ide langflow/langflow-ide -n langflow --create-namespace
|
||||
```
|
||||
|
||||
3. Check the status of the pods
|
||||
|
||||
```text
|
||||
kubectl get pods -n langflow
|
||||
```
|
||||
|
||||
|
||||
```text
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
langflow-0 1/1 Running 0 33s
|
||||
langflow-frontend-5d9c558dbb-g7tc9 1/1 Running 0 38s
|
||||
```
|
||||
|
||||
|
||||
### Step 2. Access LangFlow {#34c71d04351949deb6c8ed7ffe30eafb}
|
||||
|
||||
|
||||
Enable local port forwarding to access LangFlow from your local machine.
|
||||
|
||||
|
||||
```text
|
||||
kubectl port-forward -n langflow svc/langflow-langflow-runtime 7860:7860
|
||||
```
|
||||
|
||||
|
||||
Now you can access LangFlow at [http://localhost:7860/](http://localhost:7860/).
|
||||
|
||||
|
||||
### LangFlow version {#645c6ef7984d4da0bcc4170bab0ff415}
|
||||
|
||||
|
||||
To specify a different LangFlow version, you can set the `langflow.backend.image.tag` and `langflow.frontend.image.tag` values in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
langflow:
|
||||
backend:
|
||||
image:
|
||||
tag: "1.0.0a59"
|
||||
frontend:
|
||||
image:
|
||||
tag: "1.0.0a59"
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Storage {#6772c00af79147d293c821b4c6905d3b}
|
||||
|
||||
|
||||
By default, the chart will use a SQLLite database stored in a local persistent disk.
|
||||
If you want to use an external PostgreSQL database, you can set the `langflow.database` values in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
# Deploy postgresql. You can skip this section if you have an existing postgresql database.
|
||||
postgresql:
|
||||
enabled: true
|
||||
fullnameOverride: "langflow-ide-postgresql-service"
|
||||
auth:
|
||||
username: "langflow"
|
||||
password: "langflow-postgres"
|
||||
database: "langflow-db"
|
||||
|
||||
langflow:
|
||||
backend:
|
||||
externalDatabase:
|
||||
enabled: true
|
||||
driver:
|
||||
value: "postgresql"
|
||||
host:
|
||||
value: "langflow-ide-postgresql-service"
|
||||
port:
|
||||
value: "5432"
|
||||
database:
|
||||
value: "langflow-db"
|
||||
user:
|
||||
value: "langflow"
|
||||
password:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: "password"
|
||||
name: "langflow-ide-postgresql-service"
|
||||
sqlite:
|
||||
enabled: false
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Scaling {#e1d95ba6551742aa86958dc03b26129e}
|
||||
|
||||
|
||||
You can scale the number of replicas for the LangFlow backend and frontend services by changing the `replicaCount` value in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
langflow:
|
||||
backend:
|
||||
replicaCount: 3
|
||||
frontend:
|
||||
replicaCount: 3
|
||||
|
||||
```
|
||||
|
||||
|
||||
You can scale frontend and backend services independently.
|
||||
|
||||
|
||||
To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
langflow:
|
||||
backend:
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
frontend:
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Deploy on AWS EKS, Google GKE, or Azure AKS and other examples {#a8c3d4dc4e4f42f49b21189df5e2b851}
|
||||
|
||||
|
||||
Visit the [LangFlow Helm Charts repository](https://github.com/langflow-ai/langflow-helm-charts) for more information.
|
||||
|
||||
|
||||
## LangFlow (Runtime) {#49f2813ad2d3460081ad26a286a65e73}
|
||||
|
||||
|
||||
The runtime chart is tailored for deploying applications in a production environment. It is focused on stability, performance, isolation, and security to ensure that applications run reliably and efficiently.
|
||||
|
||||
|
||||
Using a dedicated deployment for a set of flows is fundamental in production environments to have granular resource control.
|
||||
|
||||
|
||||
### Prerequisites {#3ad3a9389fff483ba8bd309189426a9d}
|
||||
|
||||
- Kubernetes server
|
||||
- kubectl
|
||||
- Helm
|
||||
|
||||
### Step 0. Prepare a Kubernetes cluster {#aaa764703ec44bd5ba64b5ef4599630b}
|
||||
|
||||
|
||||
Follow the same steps as for the LangFlow IDE.
|
||||
|
||||
|
||||
### Step 1. Install the LangFlow runtime Helm chart {#72a18aa8349c421186ba01d73a002531}
|
||||
|
||||
1. Add the repository to Helm.
|
||||
|
||||
```shell
|
||||
helm repo add langflow <https://langflow-ai.github.io/langflow-helm-charts>
|
||||
helm repo update
|
||||
|
||||
```
|
||||
|
||||
2. Install the LangFlow app with the default options in the `langflow` namespace.
|
||||
If you bundled the flow in a docker image, you can specify the image name in the `values.yaml` file or with the `-set` flag:
|
||||
If you want to download the flow from a remote location, you can specify the URL in the `values.yaml` file or with the `-set` flag:
|
||||
|
||||
```shell
|
||||
helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set image.repository=myuser/langflow-just-chat --set image.tag=1.0.0
|
||||
|
||||
```
|
||||
|
||||
|
||||
```shell
|
||||
helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set downloadFlows.flows[0].url=https://raw.githubusercontent.com/langflow-ai/langflow/dev/src/backend/base/langflow/initial_setup/starter_projects/Basic%20Prompting%20(Hello%2C%20world!).json
|
||||
|
||||
```
|
||||
|
||||
3. Check the status of the pods.
|
||||
|
||||
```text
|
||||
kubectl get pods -n langflow
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Step 2. Access the LangFlow app API {#e13326fc07734e4aa86dfb75ccfa31f8}
|
||||
|
||||
|
||||
Enable local port forwarding to access LangFlow from your local machine.
|
||||
|
||||
|
||||
```text
|
||||
kubectl port-forward -n langflow svc/langflow-my-langflow-app 7860:7860
|
||||
|
||||
```
|
||||
|
||||
|
||||
Now you can access the API at [http://localhost:7860/api/v1/flows](http://localhost:7860/api/v1/flows) and execute the flow:
|
||||
|
||||
|
||||
```shell
|
||||
id=$(curl -s <http://localhost:7860/api/v1/flows> | jq -r '.flows[0].id')
|
||||
curl -X POST \\
|
||||
"<http://localhost:7860/api/v1/run/$id?stream=false>" \\
|
||||
-H 'Content-Type: application/json'\\
|
||||
-d '{
|
||||
"input_value": "Hello!",
|
||||
"output_type": "chat",
|
||||
"input_type": "chat"
|
||||
}'
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Storage {#09514d2b59064d37b685c7c0acecb861}
|
||||
|
||||
|
||||
In this case, storage is not needed as our deployment is stateless.
|
||||
|
||||
|
||||
### Log level and LangFlow configurations {#ecd97f0be96d4d1cabcc5b77a2d00980}
|
||||
|
||||
|
||||
You can set the log level and other LangFlow configurations in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: LANGFLOW_LOG_LEVEL
|
||||
value: "INFO"
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Configure secrets and variables {#b91929e92acf47c183ea4c9ba9d19514}
|
||||
|
||||
|
||||
To inject secrets and LangFlow global variables, you can use the `secrets` and `env` sections in the `values.yaml` file.
|
||||
|
||||
|
||||
Let's say your flow uses a global variable which is a secret; when you export the flow as JSON, it's recommended to not include it.
|
||||
When importing the flow in the LangFlow runtime, you can set the global variable using the `env` section in the `values.yaml` file.
|
||||
Assuming you have a global variable called `openai_key_var`, you can read it directly from a secret:
|
||||
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: openai_key_var
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: openai-key
|
||||
key: openai-key
|
||||
|
||||
```
|
||||
|
||||
|
||||
or directly from the values file (not recommended for secret values!):
|
||||
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: openai_key_var
|
||||
value: "sk-...."
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Scaling {#359b9ea5302147ebbed3ab8aa49dae8d}
|
||||
|
||||
|
||||
You can scale the number of replicas for the LangFlow app by changing the `replicaCount` value in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
replicaCount: 3
|
||||
|
||||
```
|
||||
|
||||
|
||||
To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
|
||||
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Other examples {#8522b4276b51448e9f8f0c6efc731a7c}
|
||||
|
||||
|
||||
Visit the LangFlow Helm Charts repository for more examples and configurations.
|
||||
|
||||
|
||||
Use the default values file as reference for all the options available.
|
||||
|
||||
|
||||
Visit the examples directory to learn more about different deployment options.
|
||||
|
||||
27
docs/docs/Deployment/deployment-railway.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
title: Railway
|
||||
sidebar_position: 5
|
||||
slug: /deployment-railway
|
||||
---
|
||||
|
||||
|
||||
|
||||
# Deploy on Railway {#a9a1ce4d39e74cc29aef4d30c6172d10}
|
||||
|
||||
|
||||
Railway is a cloud infrastructure platform that enables developers to deploy and manage applications effortlessly. It provides an intuitive interface, seamless integrations, and powerful features like auto-deploy from GitHub, managed databases, and automatic scaling.
|
||||
|
||||
|
||||
Deploying Langflow to Railway involves a few simple steps:
|
||||
|
||||
1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Railway platform.
|
||||
|
||||
[https://railway.app/template/JMXEWp?referralCode=MnPSdg](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
|
||||
|
||||
2. **Deploy**: Proceed to deploy your Langflow instance. Click Deploy Now to deploy the instance. Railway will handle the rest, including setting up the infrastructure, deploying the Langflow instance, and starting the application.
|
||||
|
||||

|
||||
|
||||
|
||||
By following these steps, your Langflow instance will be successfully deployed on Railway.
|
||||
|
||||
31
docs/docs/Deployment/deployment-render.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: Render
|
||||
sidebar_position: 4
|
||||
slug: /deployment-render
|
||||
---
|
||||
|
||||
|
||||
|
||||
# Deploy on Render {#20a959b7047e44e490cc129fd21895c0}
|
||||
|
||||
|
||||
[Render.com](http://render.com/) is a unified cloud platform designed to make deploying web applications, APIs, and static sites easy. It provides a streamlined experience with powerful features like automatic SSL, managed databases, and auto-deploy from Git, making it a popular choice for developers looking to simplify their deployment workflows.
|
||||
|
||||
|
||||
Deploying Langflow to Render is a straightforward process that can be completed in just a few steps:
|
||||
|
||||
1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Render platform.
|
||||
|
||||
[https://render.com/deploy?repo=https://github.com/langflow-ai/langflow/tree/dev](https://render.com/deploy?repo=https%3A%2F%2Fgithub.com%2Flangflow-ai%2Flangflow%2Ftree%2Fdev)
|
||||
|
||||
2. **Select the Default Configuration**: Once on the Render platform, you will be prompted to provide a blueprint name and to select the default configuration for Langflow. This configuration includes all the necessary settings and resources to run Langflow efficiently. You can change the branch of the repo to “main” or “dev” based on your preference. Click “Create New Resources” to proceed.
|
||||
|
||||

|
||||
|
||||
3. **Deploy**: After selecting the configuration, proceed to deploy your Langflow instance. You can keep the default Starter instance, or change it to another instance based on your specific needs. Click Apply to deploy the instance. Render will handle the rest, including setting up the database, deploying the Langflow instance, and starting the application.
|
||||
|
||||

|
||||
|
||||
|
||||
By following these steps, your Langflow instance will be successfully deployed on Render. Remember to review the pricing details on the Render platform to understand any costs involved.
|
||||
|
||||
|
Before Width: | Height: | Size: 54 MiB After Width: | Height: | Size: 54 MiB |
|
Before Width: | Height: | Size: 486 KiB After Width: | Height: | Size: 486 KiB |
|
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
BIN
docs/docs/Getting-Started/602374500.png
Normal file
|
After Width: | Height: | Size: 372 KiB |
1
docs/docs/Getting-Started/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":1, "label":"Getting Started"}
|
||||
@@ -0,0 +1,53 @@
|
||||
---
|
||||
title: ❗️ Common Installation Issues
|
||||
sidebar_position: 3
|
||||
slug: /getting-started-common-installation-issues
|
||||
---
|
||||
|
||||
|
||||
|
||||
This is a list of possible issues that you may encounter when installing Langflow and how to solve them.
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
```bash
|
||||
> No module named 'langflow.__main__'
|
||||
```
|
||||
|
||||
1. Run `python -m langflow run` instead of `langflow run`.
|
||||
2. If that doesn't work, reinstall Langflow with `_python -m pip install langflow --pre -U`.
|
||||
3. If that doesn't work, reinstall Langflow and its dependencies with `python -m pip install langflow --pre -U --force-reinstall`.
|
||||
|
||||
When you try to run Langflow using the command `langflow run`, you may encounter the following error:
|
||||
|
||||
|
||||
```bash
|
||||
> langflow runTraceback (most recent call last): File ".../langflow", line 5, in <module> from langflow.__main__ import mainModuleNotFoundError: No module named 'langflow.__main__'
|
||||
```
|
||||
|
||||
|
||||
There are two possible reasons for this error:
|
||||
|
||||
1. You've installed Langflow using `pip install langflow` but you already had a previous version of Langflow installed in your system. In this case, you might be running the wrong executable. To solve this issue, run the correct executable by running `python -m langflow run` instead of `langflow run`. If that doesn't work, try uninstalling and reinstalling Langflow with `python -m pip install langflow --pre -U`.
|
||||
2. Some version conflicts might have occurred during the installation process. Run `python -m pip install langflow --pre -U --force-reinstall` to reinstall Langflow and its dependencies.
|
||||
|
||||
```bash
|
||||
> Something went wrong running migrations. Please, run 'langflow migration --fix'
|
||||
```
|
||||
|
||||
|
||||
Clear the cache by deleting the contents of the cache folder.
|
||||
|
||||
|
||||
This folder can be found at:
|
||||
|
||||
- **Linux or WSL2 on Windows**: `home/<username>/.cache/langflow/`
|
||||
- **MacOS**: `/Users/<username>/Library/Caches/langflow/`
|
||||
|
||||
This error can occur during Langflow upgrades when the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but will also erase your settings.
|
||||
|
||||
|
||||
If you wish to retain your files, back them up before clearing the folder.
|
||||
|
||||
85
docs/docs/Getting-Started/getting-started-installation.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
title: 📦 Installation
|
||||
sidebar_position: 1
|
||||
slug: /getting-started-installation
|
||||
---
|
||||
|
||||
|
||||
|
||||
## **DataStax Langflow** {#a3b4bd8dec5a49ebbfca4828492133e9}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
The easiest way to get started with Langflow is through the DataStax Cloud Service!
|
||||
|
||||
|
||||
|
||||
**DataStax Langflow** is a hosted version of Langflow integrated with [AstraDB](https://www.datastax.com/products/datastax-astra). Be up and running in minutes with no installation or setup required. [Sign up for free](https://langflow.datastax.com/).
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
# **Install Langflow Locally** {#ef364ee864c545649d248113ad7d3038}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
:::caution
|
||||
|
||||
Langflow **requires** Python version 3.10 or greater and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/) to be installed on your system.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
|
||||
|
||||
Install Langflow with pip:
|
||||
|
||||
|
||||
```bash
|
||||
python -m pip install langflow -U
|
||||
```
|
||||
|
||||
|
||||
Install Langflow with pipx:
|
||||
|
||||
|
||||
```bash
|
||||
pipx install langflow --python python3.10 --fetch-missing-python
|
||||
```
|
||||
|
||||
|
||||
Pipx can fetch the missing Python version for you with `--fetch-missing-python`, but you can also install the Python version manually. Use `--force-reinstall` to ensure you have the latest version of Langflow and its dependencies.
|
||||
|
||||
|
||||
## Having a problem? {#86a16dad1d6e481cafb90efea2b9ff93}
|
||||
|
||||
|
||||
If you encounter a problem, see [Common Installation Issues](/getting-started-common-installation-issues).
|
||||
|
||||
|
||||
To get help in the Langflow CLI:
|
||||
|
||||
|
||||
```bash
|
||||
python -m langflow --help
|
||||
```
|
||||
|
||||
|
||||
## ⛓️ Run Langflow {#d318c4d486b74f5383c45b4f6859dcaa}
|
||||
|
||||
1. To run Langflow, enter the following command.
|
||||
|
||||
```bash
|
||||
python -m langflow run
|
||||
```
|
||||
|
||||
1. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in a Chromium-based browser.
|
||||
|
||||

|
||||
|
||||
1. Continue on to the [Quickstart](/getting-started-quickstart).
|
||||
71
docs/docs/Getting-Started/getting-started-quickstart.md
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
title: ⚡️ Quickstart
|
||||
sidebar_position: 2
|
||||
slug: /getting-started-quickstart
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Prerequisites {#b5f154a3a1d242c7bdf57acf0a552732}
|
||||
|
||||
|
||||
---
|
||||
|
||||
- [Python >=3.10](https://www.python.org/downloads/release/python-3100/) and [pip](https://pypi.org/project/pip/) or [pipx](https://pipx.pypa.io/stable/installation/)
|
||||
- [OpenAI API key](https://platform.openai.com/)
|
||||
- [Langflow installed and running](/getting-started-installation)
|
||||
|
||||
## Hello World - Basic Prompting {#67e7cd59d0fa43e3926bdc75134f7472}
|
||||
|
||||
|
||||
Let's start with a Prompt component to instruct an OpenAI Model.
|
||||
|
||||
|
||||
Prompts serve as the inputs to a large language model (LLM), acting as the interface between human instructions and computational tasks. By submitting natural language requests in a prompt to an LLM, you can obtain answers, generate text, and solve problems.
|
||||
|
||||
1. From the Langflow dashboard, click **New Project**.
|
||||
2. Select **Basic Prompting**.
|
||||
|
||||

|
||||
|
||||
|
||||
This flow allows you to chat with the **OpenAI** model by using a **Prompt** to send instructions.
|
||||
|
||||
|
||||
Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.` This should be interesting...
|
||||
|
||||
|
||||
To use the **OpenAI** component, you have two options for providing your OpenAI API Key: directly passing it to the component or creating an environment variable. For better security and manageability, creating an environment variable is recommended. Here's how to set it up:
|
||||
|
||||
|
||||
In the **OpenAI API Key** field, click the **Globe** button to access environment variables, and then click **Add New Variable**.
|
||||
|
||||
1. In the **Variable Name** field, enter `openai_api_key`.
|
||||
2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
|
||||
3. Click **Save Variable**.
|
||||
|
||||
By creating an environment variable, you keep your API key secure and make it easier to manage across different components or projects.
|
||||
|
||||
|
||||
## Run the basic prompting flow {#27ac88f4721b42c9a9587326905b8df4}
|
||||
|
||||
1. Click the **Playground** button. This where you can interact with your bot.
|
||||
2. Type any message and press Enter. And... Ahoy! 🏴☠️ The bot responds in a piratical manner!
|
||||
|
||||
## Modify the prompt for a different result {#5208b946024846169fe59ee206021a4f}
|
||||
|
||||
1. To modify your prompt results, in the **Prompt** template, click the **Template** field. The **Edit Prompt** window opens.
|
||||
2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Harold Abelson.`
|
||||
3. Run the basic prompting flow again. The response will be markedly different.
|
||||
|
||||
## Next steps {#63b6db6cb571489c86b3ae89051f1a4f}
|
||||
|
||||
|
||||
Well done! You've built your first prompt in Langflow. 🎉
|
||||
|
||||
|
||||
By dragging Langflow components to your workspace, you can create all sorts of interesting behaviors. Here are a couple of examples:
|
||||
|
||||
- [Memory Chatbot](https://docs.langflow.org/starter-projects/memory-chatbot)
|
||||
- [Blog Writer](https://docs.langflow.org/starter-projects/blog-writer)
|
||||
- [Document QA](https://docs.langflow.org/starter-projects/document-qa)
|
||||
31
docs/docs/Getting-Started/👋 Welcome-to-Langflow.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: 👋 Welcome to Langflow
|
||||
sidebar_position: 0
|
||||
slug: /
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Introduction {#e12578e9f465459592d89dbe47a54460}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Langflow is a new, visual framework for building multi-agent and RAG applications. It is open-source, Python-powered, fully customizable, LLM and vector store agnostic.
|
||||
|
||||
|
||||
Its intuitive interface allows for easy manipulation of AI building blocks, enabling developers to quickly prototype and turn their ideas into powerful, real-world solutions.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## 🚀 First steps {#c1ccb5e315b040edaa8d9a67f4960d81}
|
||||
|
||||
- [Install Langflow](/getting-started-installation) - Install and start a local Langflow server.
|
||||
- [Quickstart](/getting-started-quickstart) - Create a flow and run it.
|
||||
- [Langflow Workspace](/workspace) - Learn more about the Langflow Workspace.
|
||||
|
||||
Learn more about the exciting changes in Langflow 1.0 in [A new chapter for Langflow](/whats-new-a-new-chapter-langflow).
|
||||
|
||||
BIN
docs/docs/Guides/1313358839.png
Normal file
|
After Width: | Height: | Size: 391 KiB |
BIN
docs/docs/Guides/1988919422.png
Normal file
|
After Width: | Height: | Size: 244 KiB |
BIN
docs/docs/Guides/207457678.png
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docs/docs/Guides/403427222.png
Normal file
|
After Width: | Height: | Size: 333 KiB |
BIN
docs/docs/Guides/948333764.png
Normal file
|
After Width: | Height: | Size: 268 KiB |
1
docs/docs/Guides/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":3, "label":"Guides"}
|
||||
61
docs/docs/Guides/guides-chat-memory.md
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
title: Chat Memory
|
||||
sidebar_position: 1
|
||||
slug: /guides-chat-memory
|
||||
---
|
||||
|
||||
|
||||
|
||||
Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create separate _memories_ for agents to store and recall information as needed.
|
||||
|
||||
|
||||
In any project, as long as there are [**Chat**](/components-io) being used, memories are always being stored by default. These are messages from a user to the AI or vice-versa.
|
||||
|
||||
|
||||
To see and access this history of messages, Langflow features a component called **Chat Memory**. It retrieves previous messages and outputs them in structured format or parsed.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
To learn the basics about memory in Langflow, check out the [Memory Chatbot ](/starter-projects-memory-chatbot)starter example.
|
||||
|
||||
|
||||
Memories can be visualized and managed directly from the **Playground**. You can edit and remove previous messages to inspect and validate the AI’s response behavior. You can remove or edit previous messages to get your models acting just right.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
Modifying these memories will influence the behavior of the chatbot responses, as long as an agent uses them. Here you have the ability to remove or edit previous messages, allowing them to manipulate and explore how these changes affect model responses.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## Session ID {#4ee86e27d1004e8288a72c633c323703}
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
Chat conversations store messages categorized by a `Session ID`. A a single flow can host multiple session IDs, and different flows can also share the same one.
|
||||
|
||||
|
||||
The **Chat Memory** component also retrieves message histories by `Session ID` which users can change in the advanced settings.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
By default, if the `Session ID` value is empty, it is set to match the the same value as the `Flow ID`.
|
||||
|
||||
|
||||
You can also display all messages stored across every flow and session by going to **Settings** > **Messages**.
|
||||
|
||||
|
||||

|
||||
|
||||
89
docs/docs/Guides/guides-data-message.md
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
title: Data & Message
|
||||
sidebar_position: 2
|
||||
slug: /guides-data-message
|
||||
---
|
||||
|
||||
|
||||
|
||||
In Langflow, the `Data` and `Message` objects serve as structured, functional representations of data that enhance the capabilities and reliability of the platform.
|
||||
|
||||
|
||||
### The Data Object {#e0d56e463d2f483bb1b5df09d88bf309}
|
||||
|
||||
|
||||
The `Data` object is a Pydantic model that serves as a container for storing and manipulating data. It carries `data`—a dictionary that can be accessed as attributes—and uses `text_key` to specify which key in the dictionary should be considered the primary text content.
|
||||
|
||||
|
||||
- **Main Attributes:**
|
||||
- `text_key`: Specifies the key to retrieve the primary text data.
|
||||
- `data`: A dictionary to store additional data.
|
||||
- `default_value`: default value when the `text_key` is not present in the `data` dictionary.
|
||||
|
||||
**Creating a Data Object:**
|
||||
|
||||
|
||||
You can create a `Data` object by directly assigning key-value pairs to it. For example:
|
||||
|
||||
|
||||
```python
|
||||
from langflow.schema import Data
|
||||
|
||||
# Creating a Data object with specified key-value pairs
|
||||
data = Data(text="my_string", bar=3, foo="another_string")
|
||||
|
||||
# Outputs:
|
||||
print(data.text) # Outputs: "my_string"
|
||||
print(data.bar) # Outputs: 3
|
||||
print(data.foo) # Outputs: "another_string"
|
||||
```
|
||||
|
||||
|
||||
The `text_key` specifies which key in the `data` dictionary should be considered the primary text content. The `default_value` provides a fallback if the `text_key` is not present.
|
||||
|
||||
|
||||
```python
|
||||
# Creating a Data object with a specific text_key and default_value
|
||||
data = Data(data={"title": "Hello, World!"}, text_key="content", default_value="No content available")
|
||||
|
||||
# Accessing the primary text using text_key and default_value
|
||||
print(data.get_text()) # Outputs: "No content available" because "content" key is not in the data dictionary
|
||||
|
||||
# Accessing data keys by calling the attribute directly
|
||||
print(data.title) # Outputs: "Hello, World!" because "title" key is in the data dictionary
|
||||
```
|
||||
|
||||
|
||||
The `Data` object is also convenient for visualization of outputs, since the output preview has visual elements to inspect data as a table and its cells as pop ups for basic types. The idea is to create a unified way to work and visualize complex information in Langflow.
|
||||
|
||||
|
||||
To receive `Data` objects in a component input, you can use the `DataInput` input type.
|
||||
|
||||
|
||||
### The Message Object {#f4f17cad02a545068f407d515cbc2902}
|
||||
|
||||
|
||||
The `Message` object extends the functionality of `Data` and includes additional attributes and methods for chat interactions.
|
||||
|
||||
- **Main Attributes:**
|
||||
- `text_key`: Key to retrieve the primary text data.
|
||||
- `text`: The main text content of the message.
|
||||
- `sender`: Identifier for the sender (e.g., "User" or "AI").
|
||||
- `sender_name`: Name of the sender.
|
||||
- `files`: List of files associated with the message.
|
||||
- `session_id`: Identifier for the chat session.
|
||||
- `timestamp`: Timestamp when the message was created.
|
||||
- `flow_id`: Identifier for the flow.
|
||||
|
||||
The `Message` object can be used to send, store and manipulate chat messages within Langflow. You can create a `Message` object by directly assigning key-value pairs to it. For example:
|
||||
|
||||
|
||||
```python
|
||||
from langflow.schema.message import Message
|
||||
|
||||
message = Message(text="Hello, AI!", sender="User", sender_name="John Doe")
|
||||
```
|
||||
|
||||
|
||||
To receive `Message` objects in a component input, you can use the `MessageInput` input type or `MessageTextInput` when the goal is to extract just the `text` field of the `Message` object.
|
||||
|
||||
19
docs/docs/Guides/guides-new-to-llms.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
title: 📚 New to LLMs?
|
||||
sidebar_position: 0
|
||||
slug: /guides-new-to-llms
|
||||
---
|
||||
|
||||
|
||||
|
||||
Large Language Models, or LLMs, are part of an exciting new world in computing.
|
||||
|
||||
|
||||
We made Langflow for anyone to create with LLMs, and hope you'll feel comfortable installing Langflow and [getting started](/getting-started-quickstart).
|
||||
|
||||
|
||||
If you want to learn the basics of LLMs, prompt engineering, and AI models, Langflow recommends [promptingguide.ai](https://promptingguide.ai/), an open-source repository of prompt engineering content maintained by AI experts. PromptingGuide offers content for [beginners](https://www.promptingguide.ai/introduction/basics) and [experts](https://www.promptingguide.ai/techniques/cot), as well as the latest [research papers](https://www.promptingguide.ai/papers) and [test results](https://www.promptingguide.ai/research) fueling AI's progress.
|
||||
|
||||
|
||||
For in depth readings, we recommend [Awesome LLM Books](https://github.com/Hannibal046/Awesome-LLM?tab=readme-ov-file#llm-books), a curated list of resources for learning about LLMs and their applications.
|
||||
|
||||
|
Before Width: | Height: | Size: 620 KiB After Width: | Height: | Size: 620 KiB |
|
Before Width: | Height: | Size: 298 KiB After Width: | Height: | Size: 298 KiB |
1
docs/docs/Integrations/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":9, "label":"Integrations"}
|
||||
32
docs/docs/Integrations/integrations-langsmith.md
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
title: LangSmith
|
||||
sidebar_position: 0
|
||||
slug: /integrations-langsmith
|
||||
---
|
||||
|
||||
|
||||
|
||||
LangSmith is a full-lifecycle DevOps service from LangChain that provides monitoring and observability. To integrate with Langflow, just add your LangChain API key as a Langflow environment variable and you are good to go!
|
||||
|
||||
|
||||
## Step-by-step Configuration {#b912579a43984f9a92921232b67c885d}
|
||||
|
||||
1. Obtain your LangChain API key from [https://smith.langchain.com](https://smith.langchain.com/)
|
||||
2. Add the following keys to Langflow .env file:
|
||||
|
||||
`LANGCHAIN_API_KEY="your-api-key"LANGCHAIN_PROJECT="your-project-name"`
|
||||
|
||||
|
||||
or export the environment variables in your terminal:
|
||||
|
||||
|
||||
`export LANGCHAIN_API_KEY="your-api-key"export LANGCHAIN_PROJECT="your-project-name"`
|
||||
|
||||
1. Restart Langflow using `langflow run --env-file .env`
|
||||
2. Run any project and check the LangSmith dashboard for monitoring and observability.
|
||||
|
||||

|
||||
|
||||
|
||||

|
||||
|
||||
BIN
docs/docs/Settings/418277339.png
Normal file
|
After Width: | Height: | Size: 172 KiB |
1
docs/docs/Settings/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":7, "label":"Settings"}
|
||||
@@ -1,17 +1,20 @@
|
||||
import ThemedImage from "@theme/ThemedImage";
|
||||
import useBaseUrl from "@docusaurus/useBaseUrl";
|
||||
import ZoomableImage from "/src/theme/ZoomableImage.js";
|
||||
---
|
||||
title: Global Variables
|
||||
sidebar_position: 0
|
||||
slug: /settings-global-variables
|
||||
---
|
||||
|
||||
import ReactPlayer from "react-player";
|
||||
import Admonition from "@theme/Admonition";
|
||||
|
||||
# Global Variables
|
||||
>
|
||||
> ⚠️ WARNING
|
||||
> This page may contain outdated information. It will be updated as soon as possible.
|
||||
>
|
||||
|
||||
<Admonition type="warning" title="warning">
|
||||
This page may contain outdated information. It will be updated as soon as possible.
|
||||
</Admonition>
|
||||
|
||||
Global Variables are a useful feature of Langflow, allowing you to define reusable variables accessed from any Text field in your project.
|
||||
|
||||
|
||||
**TL;DR**
|
||||
|
||||
- Global Variables are reusable variables accessible from any Text field in your project.
|
||||
@@ -21,64 +24,65 @@ Global Variables are a useful feature of Langflow, allowing you to define reusab
|
||||
- All Credential Global Variables are encrypted and accessible only by you.
|
||||
- Set _`LANGFLOW_STORE_ENVIRONMENT_VARIABLES`_ to _`true`_ in your `.env` file to add all variables in _`LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`_ to your user's Global Variables.
|
||||
|
||||
## Create and Add a Global Variable
|
||||
## Create and Add a Global Variable {#3543d5ef00eb453aa459b97ba85501e5}
|
||||
|
||||
|
||||
To create and add a global variable, click the 🌐 button in a Text field, and then click **+ Add New Variable**.
|
||||
|
||||
|
||||
Text fields are where you write text without opening a Text area, and are identified with the 🌐 icon.
|
||||
|
||||
|
||||
For example, to create an environment variable for the **OpenAI** component:
|
||||
|
||||
1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**.
|
||||
2. Enter `openai_api_key` in the **Variable Name** field.
|
||||
3. Paste your OpenAI API Key (`sk-...`) in the **Value** field.
|
||||
4. Select **Credential** for the **Type**.
|
||||
5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**.
|
||||
6. Click **Save Variable**.
|
||||
1. In the **OpenAI API Key** text field, click the 🌐 button, then **Add New Variable**.
|
||||
2. Enter `openai_api_key` in the **Variable Name** field.
|
||||
3. Paste your OpenAI API Key (`sk-...`) in the **Value** field.
|
||||
4. Select **Credential** for the **Type**.
|
||||
5. Choose **OpenAI API Key** in the **Apply to Fields** field to apply this variable to all fields named **OpenAI API Key**.
|
||||
6. Click **Save Variable**.
|
||||
|
||||
You now have a `openai_api_key` global environment variable for your Langflow project.
|
||||
Subsequently, clicking the 🌐 button in a Text field will display the new variable in the dropdown.
|
||||
|
||||
<Admonition type="tip">
|
||||
You can also create global variables in **Settings** > **Variables and
|
||||
Secrets**.
|
||||
</Admonition>
|
||||
|
||||
<ZoomableImage
|
||||
alt="Docusaurus themed image"
|
||||
sources={{
|
||||
light: "img/global-env.png",
|
||||
dark: "img/global-env.png",
|
||||
}}
|
||||
style={{ width: "40%", margin: "20px auto" }}
|
||||
/>
|
||||
> 💡 TIP
|
||||
> You can also create global variables in Settings > Variables and Secrets.
|
||||
>
|
||||
|
||||
To view and manage your project's global environment variables, visit **Settings** > **Variables and Secrets**.
|
||||
|
||||
For more on variables in HuggingFace Spaces, see [Managing Secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets).
|
||||

|
||||
|
||||
{/* All variables are encrypted */}
|
||||
|
||||
<Admonition type="warning">
|
||||
All Credential Global Variables are encrypted and accessible only by you.
|
||||
</Admonition>
|
||||
To view and manage your project's global environment variables, visit **Settings** > **Variables and Secrets**.
|
||||
|
||||
|
||||
> ⚠️ WARNING
|
||||
> All Credential Global Variables are encrypted and accessible only by you.
|
||||
>
|
||||
|
||||
|
||||
## Configure Environment Variables in your .env file {#76844a93dbbc4d1ba551ea1a4a89ccdd}
|
||||
|
||||
## Configure Environment Variables in your .env file
|
||||
|
||||
Setting `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `true` in your `.env` file (default) adds all variables in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` to your user's Global Variables.
|
||||
|
||||
|
||||
These variables are accessible like any other Global Variable.
|
||||
|
||||
<Admonition type="tip">
|
||||
To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to
|
||||
`false` in your `.env` file.
|
||||
</Admonition>
|
||||
|
||||
> 💡 TIP
|
||||
> To prevent this behavior, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `false` in your `.env` file.
|
||||
>
|
||||
|
||||
|
||||
You can specify variables to get from the environment by listing them in `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT`.
|
||||
|
||||
|
||||
Specify variables as a comma-separated list (e.g., _`"VARIABLE1, VARIABLE2"`_) or a JSON-encoded string (e.g., _`'["VARIABLE1", "VARIABLE2"]'`_).
|
||||
|
||||
The default list of variables includes:
|
||||
|
||||
The default list of variables includes the ones below and more:
|
||||
|
||||
- ANTHROPIC_API_KEY
|
||||
- ASTRA_DB_API_ENDPOINT
|
||||
@@ -102,8 +106,5 @@ The default list of variables includes:
|
||||
- VECTARA_CORPUS_ID
|
||||
- VECTARA_API_KEY
|
||||
|
||||
<div
|
||||
style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ReactPlayer playing controls url="/videos/langflow_global_variables.mp4" />
|
||||
</div>
|
||||
<ReactPlayer controls url="https://prod-files-secure.s3.us-west-2.amazonaws.com/09f11537-5a5b-4f56-9e8d-de8ebcfae549/7030d3ff-3ecd-44db-8640-9c2295b4e3bc/langflow_global_variables.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIAT73L2G45HZZMZUHI%2F20240712%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Date=20240712T201930Z&X-Amz-Expires=3600&X-Amz-Signature=5f73d1f28bf3681200b6871209bbb7d5ad8677f2a136b139c5ddc22667af6959&X-Amz-SignedHeaders=host&x-id=GetObject" />
|
||||
|
||||
82
docs/docs/Settings/settings-project-general-settings.md
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
title: Project & General Settings
|
||||
sidebar_position: 1
|
||||
slug: /settings-project-general-settings
|
||||
---
|
||||
|
||||
|
||||
|
||||
> ⚠️ WARNING
|
||||
> This page may contain outdated information. It will be updated as soon as possible.
|
||||
|
||||
|
||||
Change the **Project Settings** or **General Settings** for Langflow.
|
||||
|
||||
|
||||
## Project Settings {#71e61e6544c94f808cd74b8cc012363d}
|
||||
|
||||
|
||||
Click **Project Name** > **Settings** to view your **Project Settings**.
|
||||
|
||||
- **Name** - the name of your project.
|
||||
- **Description** - the description for your project.
|
||||
Visible on the Langflow Store.
|
||||
- **Endpoint name** - the custom endpoint name for your project's API endpoint.
|
||||
To use the default value, leave this field blank.
|
||||
|
||||
## General Settings {#1a0c451fd5e84feeb1d18c2886d642eb}
|
||||
|
||||
|
||||
Select your **Profile Picture** > **Settings** to view your **General Settings**.
|
||||
|
||||
|
||||
### Profile Picture {#8abfa80ed6c448b6977467679d43c275}
|
||||
|
||||
|
||||
Select a profile picture.
|
||||
|
||||
|
||||
### Store API Key {#6a12756beb0b42fd84bdf5ab5f10fffb}
|
||||
|
||||
|
||||
Add your **Langflow Store** API key. To get a Store key, go to the [Langflow store](https://www.langflow.store/).
|
||||
|
||||
|
||||
### Global Variables {#12aa7f28060447babc987bdf57fc065e}
|
||||
|
||||
|
||||
Select **Add New** to add a key to Langflow.
|
||||
|
||||
|
||||
Select the **trash icon** to delete a key.
|
||||
|
||||
|
||||
For more information, see Global Variables.
|
||||
|
||||
|
||||
### Langflow API {#0a08ffc3dd9042da9f9d2d49c9df0b6a}
|
||||
|
||||
|
||||
Create a Langflow API key.
|
||||
|
||||
|
||||
Click **Add New** > **Create Secret Key** and copy the key somewhere safe and accessible.
|
||||
|
||||
|
||||
For more information, see Langflow API.
|
||||
|
||||
|
||||
### Shortcuts {#6c9d705c9e7f466db496dbc6571c81d2}
|
||||
|
||||
|
||||
A list of keyboard shortcuts for Langflow.
|
||||
|
||||
|
||||
### Messages {#898425cf88b74c048c23e8e1e1d7c2bd}
|
||||
|
||||
|
||||
Inspect, edit, and remove messages in your flow for testing and debugging purposes.
|
||||
|
||||
|
||||
For more information, see the Playground.
|
||||
|
||||
BIN
docs/docs/Starter-Projects/1073956357.png
Normal file
|
After Width: | Height: | Size: 156 KiB |
BIN
docs/docs/Starter-Projects/1079168789.png
Normal file
|
After Width: | Height: | Size: 433 KiB |
BIN
docs/docs/Starter-Projects/1140665127.png
Normal file
|
After Width: | Height: | Size: 299 KiB |
BIN
docs/docs/Starter-Projects/1390293355.png
Normal file
|
After Width: | Height: | Size: 118 KiB |
BIN
docs/docs/Starter-Projects/1511598495.png
Normal file
|
After Width: | Height: | Size: 281 KiB |
BIN
docs/docs/Starter-Projects/1835734464.png
Normal file
|
After Width: | Height: | Size: 315 KiB |
BIN
docs/docs/Starter-Projects/1946624394.png
Normal file
|
After Width: | Height: | Size: 424 KiB |
BIN
docs/docs/Starter-Projects/25156979.png
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
docs/docs/Starter-Projects/257920618.png
Normal file
|
After Width: | Height: | Size: 314 KiB |
BIN
docs/docs/Starter-Projects/282456806.png
Normal file
|
After Width: | Height: | Size: 270 KiB |
BIN
docs/docs/Starter-Projects/447530731.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
docs/docs/Starter-Projects/450254819.png
Normal file
|
After Width: | Height: | Size: 330 KiB |
|
Before Width: | Height: | Size: 258 KiB After Width: | Height: | Size: 258 KiB |
BIN
docs/docs/Starter-Projects/648489928.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
docs/docs/Starter-Projects/690736575.png
Normal file
|
After Width: | Height: | Size: 383 KiB |
BIN
docs/docs/Starter-Projects/727819216.png
Normal file
|
After Width: | Height: | Size: 239 KiB |
1
docs/docs/Starter-Projects/_category_.json
Normal file
@@ -0,0 +1 @@
|
||||
{"position":2, "label":"Starter Projects"}
|
||||