Merge remote-tracking branch 'origin/main' into llama3

This commit is contained in:
Javier Martinez 2024-07-29 15:47:21 +02:00
commit a19e991fa1
No known key found for this signature in database
35 changed files with 1569 additions and 221 deletions

105
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View file

@ -0,0 +1,105 @@
name: Bug Report
description: Report a bug or issue with the project.
title: "[BUG] "
labels: ["bug"]
body:
- type: markdown
attributes:
value: |
**Please describe the bug you encountered.**
- type: checkboxes
id: pre-check
attributes:
label: Pre-check
description: Please confirm that you have searched for duplicate issues before creating this one.
options:
- label: I have searched the existing issues and none cover this bug.
required: true
- type: textarea
id: description
attributes:
label: Description
description: Provide a detailed description of the bug.
placeholder: "Detailed description of the bug"
validations:
required: true
- type: textarea
id: steps
attributes:
label: Steps to Reproduce
description: Provide the steps to reproduce the bug.
placeholder: "1. Step one\n2. Step two\n3. Step three"
validations:
required: true
- type: input
id: expected
attributes:
label: Expected Behavior
description: Describe what you expected to happen.
placeholder: "Expected behavior"
validations:
required: true
- type: input
id: actual
attributes:
label: Actual Behavior
description: Describe what actually happened.
placeholder: "Actual behavior"
validations:
required: true
- type: input
id: environment
attributes:
label: Environment
description: Provide details about your environment (e.g., OS, GPU, profile, etc.).
placeholder: "Environment details"
validations:
required: true
- type: input
id: additional
attributes:
label: Additional Information
description: Provide any additional information that may be relevant (e.g., logs, screenshots).
placeholder: "Any additional information that may be relevant"
- type: input
id: version
attributes:
label: Version
description: Provide the version of the project where you encountered the bug.
placeholder: "Version number"
- type: markdown
attributes:
value: |
**Please ensure the following setup checklist has been reviewed before submitting the bug report.**
- type: checkboxes
id: general-setup-checklist
attributes:
label: Setup Checklist
description: Verify the following general aspects of your setup.
options:
- label: Confirm that you have followed the installation instructions in the projects documentation.
- label: Check that you are using the latest version of the project.
- label: Verify disk space availability for model storage and data processing.
- label: Ensure that you have the necessary permissions to run the project.
- type: checkboxes
id: nvidia-setup-checklist
attributes:
label: NVIDIA GPU Setup Checklist
description: Verify the following aspects of your NVIDIA GPU setup.
options:
- label: Check that the all CUDA dependencies are installed and are compatible with your GPU (refer to [CUDA's documentation](https://docs.nvidia.com/deploy/cuda-compatibility/#frequently-asked-questions))
- label: Ensure an NVIDIA GPU is installed and recognized by the system (run `nvidia-smi` to verify).
- label: Ensure proper permissions are set for accessing GPU resources.
- label: Docker users - Verify that the NVIDIA Container Toolkit is configured correctly (e.g. run `sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi`)

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Documentation
url: https://docs.privategpt.dev
about: Please refer to our documentation for more details and guidance.
- name: Discord
url: https://discord.gg/bK6mRVpErU
about: Join our Discord community to ask questions and get help.

19
.github/ISSUE_TEMPLATE/docs.yml vendored Normal file
View file

@ -0,0 +1,19 @@
name: Documentation
description: Suggest a change or addition to the documentation.
title: "[DOCS] "
labels: ["documentation"]
body:
- type: markdown
attributes:
value: |
**Please describe the documentation change or addition you would like to suggest.**
- type: textarea
id: description
attributes:
label: Description
description: Provide a detailed description of the documentation change.
placeholder: "Detailed description of the documentation change"
validations:
required: true

37
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View file

@ -0,0 +1,37 @@
name: Enhancement
description: Suggest an enhancement or improvement to the project.
title: "[FEATURE] "
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
**Please describe the enhancement or improvement you would like to suggest.**
- type: textarea
id: feature_description
attributes:
label: Feature Description
description: Provide a detailed description of the enhancement.
placeholder: "Detailed description of the enhancement"
validations:
required: true
- type: textarea
id: reason
attributes:
label: Reason
description: Explain the reason for this enhancement.
placeholder: "Reason for the enhancement"
validations:
required: true
- type: textarea
id: value
attributes:
label: Value of Feature
description: Describe the value or benefits this feature will bring.
placeholder: "Value or benefits of the feature"
validations:
required: true

19
.github/ISSUE_TEMPLATE/question.yml vendored Normal file
View file

@ -0,0 +1,19 @@
name: Question
description: Ask a question about the project.
title: "[QUESTION] "
labels: ["question"]
body:
- type: markdown
attributes:
value: |
**Please describe your question in detail.**
- type: textarea
id: question
attributes:
label: Question
description: Provide a detailed description of your question.
placeholder: "Detailed description of the question"
validations:
required: true

37
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,37 @@
# Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
## Type of Change
Please delete options that are not relevant.
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
## How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Added new unit/integration tests
- [ ] I stared at the code and made sure it makes sense
**Test Configuration**:
* Firmware version:
* Hardware:
* Toolchain:
* SDK:
## Checklist:
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged and published in downstream modules
- [ ] I ran `make check; make test` to ensure mypy and tests pass

View file

@ -11,6 +11,10 @@ jobs:
preview-docs: preview-docs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -37,14 +41,14 @@ jobs:
# Set the output for the step # Set the output for the step
echo "::set-output name=preview_url::$preview_url" echo "::set-output name=preview_url::$preview_url"
- name: Comment PR with URL using github-actions bot - name: Comment PR with URL using github-actions bot
uses: actions/github-script@v4 uses: actions/github-script@v7
if: ${{ steps.generate_docs.outputs.preview_url }} if: ${{ steps.generate_docs.outputs.preview_url }}
with: with:
script: | script: |
const preview_url = '${{ steps.generate_docs.outputs.preview_url }}'; const preview_url = '${{ steps.generate_docs.outputs.preview_url }}';
const issue_number = context.issue.number; github.rest.issues.createComment({
github.issues.createComment({ issue_number: context.issue.number,
...context.repo, owner: context.repo.owner,
issue_number: issue_number, repo: context.repo.repo,
body: `Published docs preview URL: ${preview_url}` body: `Published docs preview URL: ${preview_url}`
}) })

View file

@ -8,18 +8,9 @@ message: >-
metadata from this file. metadata from this file.
type: software type: software
authors: authors:
- given-names: Iván - name: Zylon by PrivateGPT
family-names: Martínez Toro address: hello@zylon.ai
email: ivanmartit@gmail.com website: 'https://www.zylon.ai/'
orcid: 'https://orcid.org/0009-0004-5065-2311' repository-code: 'https://github.com/zylon-ai/private-gpt'
- family-names: Gallego Vico
given-names: Daniel
email: danielgallegovico@gmail.com
orcid: 'https://orcid.org/0009-0006-8582-4384'
- given-names: Pablo
family-names: Orgaz
email: pabloogc+gh@gmail.com
orcid: 'https://orcid.org/0009-0008-0080-1437'
repository-code: 'https://github.com/imartinez/privateGPT'
license: Apache-2.0 license: Apache-2.0
date-released: '2023-05-02' date-released: '2023-05-02'

View file

@ -1,6 +1,6 @@
# 🔒 PrivateGPT 📑 # 🔒 PrivateGPT 📑
[![Tests](https://github.com/imartinez/privateGPT/actions/workflows/tests.yml/badge.svg)](https://github.com/imartinez/privateGPT/actions/workflows/tests.yml?query=branch%3Amain) [![Tests](https://github.com/zylon-ai/private-gpt/actions/workflows/tests.yml/badge.svg)](https://github.com/zylon-ai/private-gpt/actions/workflows/tests.yml?query=branch%3Amain)
[![Website](https://img.shields.io/website?up_message=check%20it&down_message=down&url=https%3A%2F%2Fdocs.privategpt.dev%2F&label=Documentation)](https://docs.privategpt.dev/) [![Website](https://img.shields.io/website?up_message=check%20it&down_message=down&url=https%3A%2F%2Fdocs.privategpt.dev%2F&label=Documentation)](https://docs.privategpt.dev/)
[![Discord](https://img.shields.io/discord/1164200432894234644?logo=discord&label=PrivateGPT)](https://discord.gg/bK6mRVpErU) [![Discord](https://img.shields.io/discord/1164200432894234644?logo=discord&label=PrivateGPT)](https://discord.gg/bK6mRVpErU)
@ -9,7 +9,7 @@
> Install & usage docs: https://docs.privategpt.dev/ > Install & usage docs: https://docs.privategpt.dev/
> >
> Join the community: [Twitter](https://twitter.com/PrivateGPT_AI) & [Discord](https://discord.gg/bK6mRVpErU) > Join the community: [Twitter](https://twitter.com/ZylonPrivateGPT) & [Discord](https://discord.gg/bK6mRVpErU)
![Gradio UI](/fern/docs/assets/ui.png?raw=true) ![Gradio UI](/fern/docs/assets/ui.png?raw=true)
@ -38,9 +38,10 @@ In addition to this, a working [Gradio UI](https://www.gradio.app/)
client is provided to test the API, together with a set of useful tools such as bulk model client is provided to test the API, together with a set of useful tools such as bulk model
download script, ingestion script, documents folder watch, etc. download script, ingestion script, documents folder watch, etc.
> 👂 **Need help applying PrivateGPT to your specific use case?** > 💡 If you are looking for an **enterprise-ready, fully private AI workspace**
> [Let us know more about it](https://forms.gle/4cSDmH13RZBHV9at7) > check out [Zylon's website](https://zylon.ai) or [request a demo](https://cal.com/zylon/demo?source=pgpt-readme).
> and we'll try to help! We are refining PrivateGPT through your feedback. > Crafted by the team behind PrivateGPT, Zylon is a best-in-class AI collaborative
> workspace that can be easily deployed on-premise (data center, bare metal...) or in your private cloud (AWS, GCP, Azure...).
## 🎞️ Overview ## 🎞️ Overview
DISCLAIMER: This README is not updated as frequently as the [documentation](https://docs.privategpt.dev/). DISCLAIMER: This README is not updated as frequently as the [documentation](https://docs.privategpt.dev/).
@ -62,7 +63,7 @@ thus a simpler and more educational implementation to understand the basic conce
to build a fully local -and therefore, private- chatGPT-like tool. to build a fully local -and therefore, private- chatGPT-like tool.
If you want to keep experimenting with it, we have saved it in the If you want to keep experimenting with it, we have saved it in the
[primordial branch](https://github.com/imartinez/privateGPT/tree/primordial) of the project. [primordial branch](https://github.com/zylon-ai/private-gpt/tree/primordial) of the project.
> It is strongly recommended to do a clean clone and install of this new version of > It is strongly recommended to do a clean clone and install of this new version of
PrivateGPT if you come from the previous, primordial version. PrivateGPT if you come from the previous, primordial version.
@ -73,7 +74,7 @@ completions, document ingestion, RAG pipelines and other low-level building bloc
We want to make it easier for any developer to build AI applications and experiences, as well as provide We want to make it easier for any developer to build AI applications and experiences, as well as provide
a suitable extensive architecture for the community to keep contributing. a suitable extensive architecture for the community to keep contributing.
Stay tuned to our [releases](https://github.com/imartinez/privateGPT/releases) to check out all the new features and changes included. Stay tuned to our [releases](https://github.com/zylon-ai/private-gpt/releases) to check out all the new features and changes included.
## 📄 Documentation ## 📄 Documentation
Full documentation on installation, dependencies, configuration, running the server, deployment options, Full documentation on installation, dependencies, configuration, running the server, deployment options,
@ -132,19 +133,19 @@ Here are a couple of examples:
#### BibTeX #### BibTeX
```bibtex ```bibtex
@software{Martinez_Toro_PrivateGPT_2023, @software{Zylon_PrivateGPT_2023,
author = {Martínez Toro, Iván and Gallego Vico, Daniel and Orgaz, Pablo}, author = {Zylon by PrivateGPT},
license = {Apache-2.0}, license = {Apache-2.0},
month = may, month = may,
title = {{PrivateGPT}}, title = {{PrivateGPT}},
url = {https://github.com/imartinez/privateGPT}, url = {https://github.com/zylon-ai/private-gpt},
year = {2023} year = {2023}
} }
``` ```
#### APA #### APA
``` ```
Martínez Toro, I., Gallego Vico, D., & Orgaz, P. (2023). PrivateGPT [Computer software]. https://github.com/imartinez/privateGPT Zylon by PrivateGPT (2023). PrivateGPT [Computer software]. https://github.com/zylon-ai/private-gpt
``` ```
## 🤗 Partners & Supporters ## 🤗 Partners & Supporters

View file

@ -1,4 +1,4 @@
# Documentation of privateGPT # Documentation of PrivateGPT
The documentation of this project is being rendered thanks to [fern](https://github.com/fern-api/fern). The documentation of this project is being rendered thanks to [fern](https://github.com/fern-api/fern).

View file

@ -32,7 +32,7 @@ navigation:
contents: contents:
- page: Introduction - page: Introduction
path: ./docs/pages/overview/welcome.mdx path: ./docs/pages/overview/welcome.mdx
# How to install privateGPT, with FAQ and troubleshooting # How to install PrivateGPT, with FAQ and troubleshooting
- tab: installation - tab: installation
layout: layout:
- section: Getting started - section: Getting started
@ -41,7 +41,9 @@ navigation:
path: ./docs/pages/installation/concepts.mdx path: ./docs/pages/installation/concepts.mdx
- page: Installation - page: Installation
path: ./docs/pages/installation/installation.mdx path: ./docs/pages/installation/installation.mdx
# Manual of privateGPT: how to use it and configure it - page: Troubleshooting
path: ./docs/pages/installation/troubleshooting.mdx
# Manual of PrivateGPT: how to use it and configure it
- tab: manual - tab: manual
layout: layout:
- section: General configuration - section: General configuration
@ -68,8 +70,10 @@ navigation:
path: ./docs/pages/manual/reranker.mdx path: ./docs/pages/manual/reranker.mdx
- section: User Interface - section: User Interface
contents: contents:
- page: User interface (Gradio) Manual - page: Gradio Manual
path: ./docs/pages/manual/ui.mdx path: ./docs/pages/ui/gradio.mdx
- page: Alternatives
path: ./docs/pages/ui/alternatives.mdx
# Small code snippet or example of usage to help users # Small code snippet or example of usage to help users
- tab: recipes - tab: recipes
layout: layout:
@ -78,7 +82,7 @@ navigation:
# TODO: add recipes # TODO: add recipes
- page: List of LLMs - page: List of LLMs
path: ./docs/pages/recipes/list-llm.mdx path: ./docs/pages/recipes/list-llm.mdx
# More advanced usage of privateGPT, by API # More advanced usage of PrivateGPT, by API
- tab: api-reference - tab: api-reference
layout: layout:
- section: Overview - section: Overview
@ -92,12 +96,11 @@ navigation:
# Definition of the navbar, will be displayed in the top right corner. # Definition of the navbar, will be displayed in the top right corner.
# `type:primary` is always displayed at the most right side of the navbar # `type:primary` is always displayed at the most right side of the navbar
navbar-links: navbar-links:
- type: secondary
text: GitHub
url: "https://github.com/imartinez/privateGPT"
- type: secondary - type: secondary
text: Contact us text: Contact us
url: "mailto:hello@zylon.ai" url: "mailto:hello@zylon.ai"
- type: github
value: "https://github.com/zylon-ai/private-gpt"
- type: primary - type: primary
text: Join the Discord text: Join the Discord
url: https://discord.com/invite/bK6mRVpErU url: https://discord.com/invite/bK6mRVpErU

View file

@ -26,12 +26,12 @@ The clients are kept up to date automatically, so we encourage you to use the la
<Card <Card
title="Java - WIP" title="Java - WIP"
icon="fa-brands fa-java" icon="fa-brands fa-java"
href="https://github.com/imartinez/privateGPT-java" href="https://github.com/zylon-ai/private-gpt-java"
/> />
<Card <Card
title="Go - WIP" title="Go - WIP"
icon="fa-brands fa-golang" icon="fa-brands fa-golang"
href="https://github.com/imartinez/privateGPT-go" href="https://github.com/zylon-ai/private-gpt-go"
/> />
</Cards> </Cards>

View file

@ -8,20 +8,27 @@ It supports a variety of LLM providers, embeddings providers, and vector stores,
## Setup configurations available ## Setup configurations available
You get to decide the setup for these 3 main components: You get to decide the setup for these 3 main components:
- LLM: the large language model provider used for inference. It can be local, or remote, or even OpenAI. - **LLM**: the large language model provider used for inference. It can be local, or remote, or even OpenAI.
- Embeddings: the embeddings provider used to encode the input, the documents and the users' queries. Same as the LLM, it can be local, or remote, or even OpenAI. - **Embeddings**: the embeddings provider used to encode the input, the documents and the users' queries. Same as the LLM, it can be local, or remote, or even OpenAI.
- Vector store: the store used to index and retrieve the documents. - **Vector store**: the store used to index and retrieve the documents.
There is an extra component that can be enabled or disabled: the UI. It is a Gradio UI that allows to interact with the API in a more user-friendly way. There is an extra component that can be enabled or disabled: the UI. It is a Gradio UI that allows to interact with the API in a more user-friendly way.
<Callout intent = "warning">
A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk
model download script, ingestion script, documents folder watch, etc. Please refer to the [UI alternatives](/manual/user-interface/alternatives) page for more UI alternatives.
</Callout>
### Setups and Dependencies ### Setups and Dependencies
Your setup will be the combination of the different options available. You'll find recommended setups in the [installation](/installation) section. Your setup will be the combination of the different options available. You'll find recommended setups in the [installation](./installation) section.
PrivateGPT uses poetry to manage its dependencies. You can install the dependencies for the different setups by running `poetry install --extras "<extra1> <extra2>..."`. PrivateGPT uses poetry to manage its dependencies. You can install the dependencies for the different setups by running `poetry install --extras "<extra1> <extra2>..."`.
Extras are the different options available for each component. For example, to install the dependencies for a a local setup with UI and qdrant as vector database, Ollama as LLM and HuggingFace as local embeddings, you would run Extras are the different options available for each component. For example, to install the dependencies for a a local setup with UI and qdrant as vector database, Ollama as LLM and local embeddings, you would run:
`poetry install --extras "ui vector-stores-qdrant llms-ollama embeddings-huggingface"`. ```bash
poetry install --extras "ui vector-stores-qdrant llms-ollama embeddings-ollama"
```
Refer to the [installation](/installation) section for more details. Refer to the [installation](./installation) section for more details.
### Setups and Configuration ### Setups and Configuration
PrivateGPT uses yaml to define its configuration in files named `settings-<profile>.yaml`. PrivateGPT uses yaml to define its configuration in files named `settings-<profile>.yaml`.
@ -37,17 +44,6 @@ will load the configuration from `settings.yaml` and `settings-ollama.yaml`.
## About Fully Local Setups ## About Fully Local Setups
In order to run PrivateGPT in a fully local setup, you will need to run the LLM, Embeddings and Vector Store locally. In order to run PrivateGPT in a fully local setup, you will need to run the LLM, Embeddings and Vector Store locally.
### Vector stores
The vector stores supported (Qdrant, ChromaDB and Postgres) run locally by default.
### Embeddings
For local Embeddings there are two options:
* (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs.
* You can use the 'embeddings-huggingface' option in PrivateGPT, which will use HuggingFace.
In order for HuggingFace LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script:
```bash
poetry run python scripts/setup
```
### LLM ### LLM
For local LLM there are two options: For local LLM there are two options:
@ -58,3 +54,14 @@ In order for LlamaCPP powered LLM to work (the second option), you need to downl
```bash ```bash
poetry run python scripts/setup poetry run python scripts/setup
``` ```
### Embeddings
For local Embeddings there are two options:
* (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs.
* You can use the 'embeddings-huggingface' option in PrivateGPT, which will use HuggingFace.
In order for HuggingFace LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script:
```bash
poetry run python scripts/setup
```
### Vector stores
The vector stores supported (Qdrant, Milvus, ChromaDB and Postgres) run locally by default.

View file

@ -1,63 +1,102 @@
It is important that you review the Main Concepts before you start the installation process. It is important that you review the [Main Concepts](../concepts) section to understand the different components of PrivateGPT and how they interact with each other.
## Base requirements to run PrivateGPT ## Base requirements to run PrivateGPT
* Clone PrivateGPT repository, and navigate to it: ### 1. Clone the PrivateGPT Repository
Clone the repository and navigate to it:
```bash ```bash
git clone https://github.com/zylon-ai/private-gpt git clone https://github.com/zylon-ai/private-gpt
cd private-gpt cd private-gpt
``` ```
* Install Python `3.11` (*if you do not have it already*). Ideally through a python version manager like `pyenv`. ### 2. Install Python 3.11
Earlier python versions are not supported. If you do not have Python 3.11 installed, install it using a Python version manager like `pyenv`. Earlier Python versions are not supported.
* osx/linux: [pyenv](https://github.com/pyenv/pyenv) #### macOS/Linux
* windows: [pyenv-win](https://github.com/pyenv-win/pyenv-win) Install and set Python 3.11 using [pyenv](https://github.com/pyenv/pyenv):
```bash
pyenv install 3.11
pyenv local 3.11
```
#### Windows
Install and set Python 3.11 using [pyenv-win](https://github.com/pyenv-win/pyenv-win):
```bash ```bash
pyenv install 3.11 pyenv install 3.11
pyenv local 3.11 pyenv local 3.11
``` ```
* Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management: ### 3. Install `Poetry`
Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management:
Follow the instructions on the official Poetry website to install it.
* Install `make` to be able to run the different scripts: ### 4. Optional: Install `make`
* osx: (Using homebrew): `brew install make` To run various scripts, you need to install `make`. Follow the instructions for your operating system:
* windows: (Using chocolatey) `choco install make` #### macOS
(Using Homebrew):
```bash
brew install make
```
#### Windows
(Using Chocolatey):
```bash
choco install make
```
## Install and run your desired setup ## Install and Run Your Desired Setup
PrivateGPT allows to customize the setup -from fully local to cloud based- by deciding the modules to use. PrivateGPT allows customization of the setup, from fully local to cloud-based, by deciding the modules to use. To install only the required dependencies, PrivateGPT offers different `extras` that can be combined during the installation process:
Here are the different options available:
- LLM: "llama-cpp", "ollama", "sagemaker", "openai", "openailike", "azopenai"
- Embeddings: "huggingface", "openai", "sagemaker", "azopenai"
- Vector stores: "qdrant", "chroma", "postgres"
- UI: whether or not to enable UI (Gradio) or just go with the API
In order to only install the required dependencies, PrivateGPT offers different `extras` that can be combined during the installation process:
```bash ```bash
poetry install --extras "<extra1> <extra2>..." poetry install --extras "<extra1> <extra2>..."
``` ```
Where `<extra>` can be any of the following options described below.
Where `<extra>` can be any of the following: ### Available Modules
- ui: adds support for UI using Gradio You need to choose one option per category (LLM, Embeddings, Vector Stores, UI). Below are the tables listing the available options for each category.
- llms-ollama: adds support for Ollama LLM, the easiest way to get a local LLM running, requires Ollama running locally
- llms-llama-cpp: adds support for local LLM using LlamaCPP - expect a messy installation process on some platforms #### LLM
- llms-sagemaker: adds support for Amazon Sagemaker LLM, requires Sagemaker inference endpoints
- llms-openai: adds support for OpenAI LLM, requires OpenAI API key | **Option** | **Description** | **Extra** |
- llms-openai-like: adds support for 3rd party LLM providers that are compatible with OpenAI's API |--------------|------------------------------------------------------------------------|---------------------|
- llms-azopenai: adds support for Azure OpenAI LLM, requires Azure OpenAI inference endpoints | **ollama** | Adds support for Ollama LLM, requires Ollama running locally | llms-ollama |
- embeddings-ollama: adds support for Ollama Embeddings, requires Ollama running locally | llama-cpp | Adds support for local LLM using LlamaCPP | llms-llama-cpp |
- embeddings-huggingface: adds support for local Embeddings using HuggingFace | sagemaker | Adds support for Amazon Sagemaker LLM, requires Sagemaker endpoints | llms-sagemaker |
- embeddings-sagemaker: adds support for Amazon Sagemaker Embeddings, requires Sagemaker inference endpoints | openai | Adds support for OpenAI LLM, requires OpenAI API key | llms-openai |
- embeddings-openai = adds support for OpenAI Embeddings, requires OpenAI API key | openailike | Adds support for 3rd party LLM providers compatible with OpenAI's API | llms-openai-like |
- embeddings-azopenai = adds support for Azure OpenAI Embeddings, requires Azure OpenAI inference endpoints | azopenai | Adds support for Azure OpenAI LLM, requires Azure endpoints | llms-azopenai |
- vector-stores-qdrant: adds support for Qdrant vector store | gemini | Adds support for Gemini LLM, requires Gemini API key | llms-gemini |
- vector-stores-chroma: adds support for Chroma DB vector store
- vector-stores-postgres: adds support for Postgres vector store #### Embeddings
| **Option** | **Description** | **Extra** |
|------------------|--------------------------------------------------------------------------------|-------------------------|
| **ollama** | Adds support for Ollama Embeddings, requires Ollama running locally | embeddings-ollama |
| huggingface | Adds support for local Embeddings using HuggingFace | embeddings-huggingface |
| openai | Adds support for OpenAI Embeddings, requires OpenAI API key | embeddings-openai |
| sagemaker | Adds support for Amazon Sagemaker Embeddings, requires Sagemaker endpoints | embeddings-sagemaker |
| azopenai | Adds support for Azure OpenAI Embeddings, requires Azure endpoints | embeddings-azopenai |
| gemini | Adds support for Gemini Embeddings, requires Gemini API key | embeddings-gemini |
#### Vector Stores
| **Option** | **Description** | **Extra** |
|------------------|-----------------------------------------|-------------------------|
| **qdrant** | Adds support for Qdrant vector store | vector-stores-qdrant |
| milvus | Adds support for Milvus vector store | vector-stores-milvus |
| chroma | Adds support for Chroma DB vector store | vector-stores-chroma |
| postgres | Adds support for Postgres vector store | vector-stores-postgres |
| clickhouse | Adds support for Clickhouse vector store| vector-stores-clickhouse|
#### UI
| **Option** | **Description** | **Extra** |
|--------------|------------------------------------------|-----------|
| Gradio | Adds support for UI using Gradio | ui |
<Callout intent = "warning">
A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk
model download script, ingestion script, documents folder watch, etc. Please refer to the [UI alternatives](/manual/user-interface/alternatives) page for more UI alternatives.
</Callout>
## Recommended Setups ## Recommended Setups
@ -81,6 +120,8 @@ set PGPT_PROFILES=ollama
make run make run
``` ```
Refer to the [troubleshooting](./troubleshooting) section for specific issues you might encounter.
### Local, Ollama-powered setup - RECOMMENDED ### Local, Ollama-powered setup - RECOMMENDED
**The easiest way to run PrivateGPT fully locally** is to depend on Ollama for the LLM. Ollama provides local LLM and Embeddings super easy to install and use, abstracting the complexity of GPU support. It's the recommended setup for local development. **The easiest way to run PrivateGPT fully locally** is to depend on Ollama for the LLM. Ollama provides local LLM and Embeddings super easy to install and use, abstracting the complexity of GPU support. It's the recommended setup for local development.
@ -89,18 +130,22 @@ Go to [ollama.ai](https://ollama.ai/) and follow the instructions to install Oll
After the installation, make sure the Ollama desktop app is closed. After the installation, make sure the Ollama desktop app is closed.
Install the models to be used, the default settings-ollama.yaml is configured to user `mistral 7b` LLM (~4GB) and `nomic-embed-text` Embeddings (~275MB). Therefore: Now, start Ollama service (it will start a local inference server, serving both the LLM and the Embeddings):
```bash
ollama serve
```
Install the models to be used, the default settings-ollama.yaml is configured to user mistral 7b LLM (~4GB) and nomic-embed-text Embeddings (~275MB)
By default, PGPT will automatically pull models as needed. This behavior can be changed by modifying the `ollama.autopull_models` property.
In any case, if you want to manually pull models, run the following commands:
```bash ```bash
ollama pull mistral ollama pull mistral
ollama pull nomic-embed-text ollama pull nomic-embed-text
``` ```
Now, start Ollama service (it will start a local inference server, serving both the LLM and the Embeddings):
```bash
ollama serve
```
Once done, on a different terminal, you can install PrivateGPT with the following command: Once done, on a different terminal, you can install PrivateGPT with the following command:
```bash ```bash
poetry install --extras "ui llms-ollama embeddings-ollama vector-stores-qdrant" poetry install --extras "ui llms-ollama embeddings-ollama vector-stores-qdrant"

View file

@ -0,0 +1,31 @@
# Downloading Gated and Private Models
Many models are gated or private, requiring special access to use them. Follow these steps to gain access and set up your environment for using these models.
## Accessing Gated Models
1. **Request Access:**
Follow the instructions provided [here](https://huggingface.co/docs/hub/en/models-gated) to request access to the gated model.
2. **Generate a Token:**
Once you have access, generate a token by following the instructions [here](https://huggingface.co/docs/hub/en/security-tokens).
3. **Set the Token:**
Add the generated token to your `settings.yaml` file:
```yaml
huggingface:
access_token: <your-token>
```
Alternatively, set the `HF_TOKEN` environment variable:
```bash
export HF_TOKEN=<your-token>
```
# Tokenizer Setup
PrivateGPT uses the `AutoTokenizer` library to tokenize input text accurately. It connects to HuggingFace's API to download the appropriate tokenizer for the specified model.
## Configuring the Tokenizer
1. **Specify the Model:**
In your `settings.yaml` file, specify the model you want to use:
```yaml
llm:
tokenizer: mistralai/Mistral-7B-Instruct-v0.2
```
2. **Set Access Token for Gated Models:**
If you are using a gated model, ensure the `access_token` is set as mentioned in the previous section.
This configuration ensures that PrivateGPT can download and use the correct tokenizer for the model you are working with.

View file

@ -93,7 +93,7 @@ time PGPT_PROFILES=mock python ./scripts/ingest_folder.py ~/my-dir/to-ingest/
## Supported file formats ## Supported file formats
privateGPT by default supports all the file formats that contains clear text (for example, `.txt` files, `.html`, etc.). PrivateGPT by default supports all the file formats that contains clear text (for example, `.txt` files, `.html`, etc.).
However, these text based file formats as only considered as text files, and are not pre-processed in any other way. However, these text based file formats as only considered as text files, and are not pre-processed in any other way.
It also supports the following file formats: It also supports the following file formats:
@ -115,11 +115,15 @@ It also supports the following file formats:
* `.ipynb` * `.ipynb`
* `.json` * `.json`
**Please note the following nuance**: while `privateGPT` supports these file formats, it **might** require additional <Callout intent = "info">
While `PrivateGPT` supports these file formats, it **might** require additional
dependencies to be installed in your python's virtual environment. dependencies to be installed in your python's virtual environment.
For example, if you try to ingest `.epub` files, `privateGPT` might fail to do it, and will instead display an For example, if you try to ingest `.epub` files, `PrivateGPT` might fail to do it, and will instead display an
explanatory error asking you to download the necessary dependencies to install this file format. explanatory error asking you to download the necessary dependencies to install this file format.
</Callout>
<Callout intent = "info">
**Other file formats might work**, but they will be considered as plain text **Other file formats might work**, but they will be considered as plain text
files (in other words, they will be ingested as `.txt` files). files (in other words, they will be ingested as `.txt` files).
</Callout>

View file

@ -193,3 +193,42 @@ or
When the server is started it will print a log *Application startup complete*. When the server is started it will print a log *Application startup complete*.
Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API. Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API.
### Using IPEX-LLM
For a fully private setup on Intel GPUs (such as a local PC with an iGPU, or discrete GPUs like Arc, Flex, and Max), you can use [IPEX-LLM](https://github.com/intel-analytics/ipex-llm).
To deploy Ollama and pull models using IPEX-LLM, please refer to [this guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/ollama_quickstart.html). Then, follow the same steps outlined in the [Using Ollama](#using-ollama) section to create a `settings-ollama.yaml` profile and run the private-GPT server.
### Using Gemini
If you cannot run a local model (because you don't have a GPU, for example) or for testing purposes, you may
decide to run PrivateGPT using Gemini as the LLM and Embeddings model. In addition, you will benefit from
multimodal inputs, such as text and images, in a very large contextual window.
In order to do so, create a profile `settings-gemini.yaml` with the following contents:
```yaml
llm:
mode: gemini
embedding:
mode: gemini
gemini:
api_key: <your_gemini_api_key> # You could skip this configuration and use the GEMINI_API_KEY env var instead
model: <gemini_model_to_use> # Optional model to use. Default is models/gemini-pro"
embedding_model: <gemini_embeddings_to_use> # Optional model to use. Default is "models/embedding-001"
```
And run PrivateGPT loading that profile you just created:
`PGPT_PROFILES=gemini make run`
or
`PGPT_PROFILES=gemini poetry run python -m private_gpt`
When the server is started it will print a log *Application startup complete*.
Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API.

View file

@ -3,8 +3,8 @@
The configuration of your private GPT server is done thanks to `settings` files (more precisely `settings.yaml`). The configuration of your private GPT server is done thanks to `settings` files (more precisely `settings.yaml`).
These text files are written using the [YAML](https://en.wikipedia.org/wiki/YAML) syntax. These text files are written using the [YAML](https://en.wikipedia.org/wiki/YAML) syntax.
While privateGPT is distributing safe and universal configuration files, you might want to quickly customize your While PrivateGPT is distributing safe and universal configuration files, you might want to quickly customize your
privateGPT, and this can be done using the `settings` files. PrivateGPT, and this can be done using the `settings` files.
This project is defining the concept of **profiles** (or configuration profiles). This project is defining the concept of **profiles** (or configuration profiles).
This mechanism, using your environment variables, is giving you the ability to easily switch between This mechanism, using your environment variables, is giving you the ability to easily switch between
@ -30,15 +30,20 @@ For example, on **linux and macOS**, this gives:
export PGPT_PROFILES=my_profile_name_here export PGPT_PROFILES=my_profile_name_here
``` ```
Windows Powershell(s) have a different syntax, one of them being: Windows Command Prompt (cmd) has a different syntax:
```shell ```shell
set PGPT_PROFILES=my_profile_name_here set PGPT_PROFILES=my_profile_name_here
``` ```
Windows Powershell has a different syntax:
```shell
$env:PGPT_PROFILES="my_profile_name_here"
```
If the above is not working, you might want to try other ways to set an env variable in your window's terminal. If the above is not working, you might want to try other ways to set an env variable in your window's terminal.
--- ---
Once you've set this environment variable to the desired profile, you can simply launch your privateGPT, Once you've set this environment variable to the desired profile, you can simply launch your PrivateGPT,
and it will run using your profile on top of the default configuration. and it will run using your profile on top of the default configuration.
## Reference ## Reference

View file

@ -1,7 +1,7 @@
## Vectorstores ## Vectorstores
PrivateGPT supports [Qdrant](https://qdrant.tech/), [Chroma](https://www.trychroma.com/) and [PGVector](https://github.com/pgvector/pgvector) as vectorstore providers. Qdrant being the default. PrivateGPT supports [Qdrant](https://qdrant.tech/), [Milvus](https://milvus.io/), [Chroma](https://www.trychroma.com/), [PGVector](https://github.com/pgvector/pgvector) and [ClickHouse](https://github.com/ClickHouse/ClickHouse) as vectorstore providers. Qdrant being the default.
In order to select one or the other, set the `vectorstore.database` property in the `settings.yaml` file to `qdrant`, `chroma` or `postgres`. In order to select one or the other, set the `vectorstore.database` property in the `settings.yaml` file to `qdrant`, `milvus`, `chroma`, `postgres` and `clickhouse`.
```yaml ```yaml
vectorstore: vectorstore:
@ -39,6 +39,24 @@ qdrant:
path: local_data/private_gpt/qdrant path: local_data/private_gpt/qdrant
``` ```
### Milvus configuration
To enable Milvus, set the `vectorstore.database` property in the `settings.yaml` file to `milvus` and install the `milvus` extra.
```bash
poetry install --extras vector-stores-milvus
```
The available configuration options are:
| Field | Description |
|--------------|-------------|
| uri | Default is set to "local_data/private_gpt/milvus/milvus_local.db" as a local file; you can also set up a more performant Milvus server on docker or k8s e.g.http://localhost:19530, as your uri; To use Zilliz Cloud, adjust the uri and token to Endpoint and Api key in Zilliz Cloud.|
| token | Pair with Milvus server on docker or k8s or zilliz cloud api key.|
| collection_name | The name of the collection, set to default "milvus_db".|
| overwrite | Overwrite the data in collection if it existed, set to default as True. |
To obtain a local setup (disk-based database) without running a Milvus server, configure the uri value in settings.yaml, to store in local_data/private_gpt/milvus/milvus_local.db.
### Chroma configuration ### Chroma configuration
To enable Chroma, set the `vectorstore.database` property in the `settings.yaml` file to `chroma` and install the `chroma` extra. To enable Chroma, set the `vectorstore.database` property in the `settings.yaml` file to `chroma` and install the `chroma` extra.
@ -101,3 +119,69 @@ Indexes:
postgres=# postgres=#
``` ```
The dimensions of the embeddings columns will be set based on the `embedding.embed_dim` value. If the embedding model changes this table may need to be dropped and recreated to avoid a dimension mismatch. The dimensions of the embeddings columns will be set based on the `embedding.embed_dim` value. If the embedding model changes this table may need to be dropped and recreated to avoid a dimension mismatch.
### ClickHouse
To utilize ClickHouse as the vector store, a [ClickHouse](https://github.com/ClickHouse/ClickHouse) database must be employed.
To enable ClickHouse, set the `vectorstore.database` property in the `settings.yaml` file to `clickhouse` and install the `vector-stores-clickhouse` extra.
```bash
poetry install --extras vector-stores-clickhouse
```
ClickHouse settings can be configured by setting values to the `clickhouse` property in the `settings.yaml` file.
The available configuration options are:
| Field | Description |
|----------------------|----------------------------------------------------------------|
| **host** | The server hosting the ClickHouse database. Default is `localhost` |
| **port** | The port on which the ClickHouse database is accessible. Default is `8123` |
| **username** | The username for database access. Default is `default` |
| **password** | The password for database access. (Optional) |
| **database** | The specific database to connect to. Default is `__default__` |
| **secure** | Use https/TLS for secure connection to the server. Default is `false` |
| **interface** | The protocol used for the connection, either 'http' or 'https'. (Optional) |
| **settings** | Specific ClickHouse server settings to be used with the session. (Optional) |
| **connect_timeout** | Timeout in seconds for establishing a connection. (Optional) |
| **send_receive_timeout** | Read timeout in seconds for http connection. (Optional) |
| **verify** | Verify the server certificate in secure/https mode. (Optional) |
| **ca_cert** | Path to Certificate Authority root certificate (.pem format). (Optional) |
| **client_cert** | Path to TLS Client certificate (.pem format). (Optional) |
| **client_cert_key** | Path to the private key for the TLS Client certificate. (Optional) |
| **http_proxy** | HTTP proxy address. (Optional) |
| **https_proxy** | HTTPS proxy address. (Optional) |
| **server_host_name** | Server host name to be checked against the TLS certificate. (Optional) |
For example:
```yaml
vectorstore:
database: clickhouse
clickhouse:
host: localhost
port: 8443
username: admin
password: <PASSWORD>
database: embeddings
secure: false
```
The following table will be created in the database:
```
clickhouse-client
:) \d embeddings.llama_index
Table "llama_index"
№ | name | type | default_type | default_expression | comment | codec_expression | ttl_expression
----|-----------|----------------------------------------------|--------------|--------------------|---------|------------------|---------------
1 | id | String | | | | |
2 | doc_id | String | | | | |
3 | text | String | | | | |
4 | vector | Array(Float32) | | | | |
5 | node_info | Tuple(start Nullable(UInt64), end Nullable(UInt64)) | | | | |
6 | metadata | String | | | | |
clickhouse-client
```
The dimensions of the embeddings columns will be set based on the `embedding.embed_dim` value. If the embedding model changes, this table may need to be dropped and recreated to avoid a dimension mismatch.

View file

@ -1,8 +1,16 @@
PrivateGPT provides an **API** containing all the building blocks required to PrivateGPT provides an **API** containing all the building blocks required to
build **private, context-aware AI applications**. build **private, context-aware AI applications**.
<Callout intent = "tip">
If you are looking for an **enterprise-ready, fully private AI workspace**
check out [Zylon's website](https://zylon.ai) or [request a demo](https://cal.com/zylon/demo?source=pgpt-docs).
Crafted by the team behind PrivateGPT, Zylon is a best-in-class AI collaborative
workspace that can be easily deployed on-premise (data center, bare metal...) or in your private cloud (AWS, GCP, Azure...).
</Callout>
The API follows and extends OpenAI API standard, and supports both normal and streaming responses. The API follows and extends OpenAI API standard, and supports both normal and streaming responses.
That means that, if you can use OpenAI API in one of your tools, you can use your own PrivateGPT API instead, That means that, if you can use OpenAI API in one of your tools, you can use your own PrivateGPT API instead,
with no code changes, **and for free** if you are running privateGPT in a `local` setup. with no code changes, **and for free** if you are running PrivateGPT in a `local` setup.
Get started by understanding the [Main Concepts and Installation](/installation) and then dive into the [API Reference](/api-reference). Get started by understanding the [Main Concepts and Installation](/installation) and then dive into the [API Reference](/api-reference).
@ -32,9 +40,3 @@ Get started by understanding the [Main Concepts and Installation](/installation)
</Cards> </Cards>
<br /> <br />
<Callout intent = "info">
A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk
model download script, ingestion script, documents folder watch, etc.
</Callout>

View file

@ -1,6 +1,7 @@
# List of working LLM # List of working LLM
**Do you have any working combination of LLM and embeddings?** **Do you have any working combination of LLM and embeddings?**
Please open a PR to add it to the list, and come on our Discord to tell us about it! Please open a PR to add it to the list, and come on our Discord to tell us about it!
## Prompt style ## Prompt style

View file

@ -0,0 +1,21 @@
This page aims to present different user interface (UI) alternatives for integrating and using PrivateGPT. These alternatives range from demo applications to fully customizable UI setups that can be adapted to your specific needs.
**Do you have any working demo project using PrivateGPT?**
Please open a PR to add it to the list, and come on our Discord to tell us about it!
<Callout intent = "note">
WIP: This page provides an overview of one of the UI alternatives available for PrivateGPT. More alternatives will be added to this page as they become available.
</Callout>
## [PrivateGPT SDK Demo App](https://github.com/frgarciames/privategpt-react)
The PrivateGPT SDK demo app is a robust starting point for developers looking to integrate and customize PrivateGPT in their applications. Leveraging modern technologies like Tailwind, shadcn/ui, and Biomejs, it provides a smooth development experience and a highly customizable user interface. Refer to the [repository](https://github.com/frgarciames/privategpt-react) for more details and to get started.
**Tech Stack:**
- **Tailwind:** A utility-first CSS framework for rapid UI development.
- **shadcn/ui:** A set of high-quality, customizable UI components.
- **PrivateGPT Web SDK:** The core SDK for interacting with PrivateGPT.
- **Biomejs formatter/linter:** A tool for maintaining code quality and consistency.

View file

@ -2,7 +2,12 @@
Gradio UI is a ready to use way of testing most of PrivateGPT API functionalities. Gradio UI is a ready to use way of testing most of PrivateGPT API functionalities.
![Gradio PrivateGPT](https://lh3.googleusercontent.com/drive-viewer/AK7aPaD_Hc-A8A9ooMe-hPgm_eImgsbxAjb__8nFYj8b_WwzvL1Gy90oAnp1DfhPaN6yGiEHCOXs0r77W1bYHtPzlVwbV7fMsA=s1600) ![Gradio PrivateGPT](https://github.com/zylon-ai/private-gpt/raw/main/fern/docs/assets/ui.png?raw=true)
<Callout intent = "warning">
A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk
model download script, ingestion script, documents folder watch, etc. Please refer to the [UI alternatives](/manual/user-interface/alternatives) page for more UI alternatives.
</Callout>
### Execution Modes ### Execution Modes

View file

@ -1,4 +1,4 @@
{ {
"organization": "privategpt", "organization": "privategpt",
"version": "0.19.10" "version": "0.31.17"
} }

675
poetry.lock generated
View file

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]] [[package]]
name = "aiofiles" name = "aiofiles"
@ -763,6 +763,96 @@ files = [
[package.dependencies] [package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""} colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "clickhouse-connect"
version = "0.7.15"
description = "ClickHouse Database Core Driver for Python, Pandas, and Superset"
optional = true
python-versions = "~=3.8"
files = [
{file = "clickhouse-connect-0.7.15.tar.gz", hash = "sha256:f6ebd6dda6a5fff774e3563cd5ed99a6a21bbc5f52847329c72136e6b3bf4cc5"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a0eda2572ee8abf508458f2834a691bfa27d040024257f93e4ea3500d4fe99b1"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cb8183e96669c615a6e553c22beb30a0dc0f59602eaf20a0e1cafaaf048dcd25"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:220f4d97cf7716dff956137c42e7ae075a7b5fa9a841bb9b3641f8c48c21e52e"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8753b90d3e77975f12c17af4b0cd7d67c15d02915d7f9ae04454d2f1d74e34d6"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c2028abf2d73038327732ebfa0c80bb6d74d8846d408629f45b375a3975bc63"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7010ae77279e58ef7872f3395dc8476071e32fd1ae172bedf8ff325b0fdb2174"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4608c18a650419fb1c1ea6df2fce64688a395b5cbf4c53b4fac69d2a43f2df71"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:af030536a656e3fac746a3145c2bff6d0b3feb86c16f2ee731f5a120f5ec084d"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-win32.whl", hash = "sha256:6f59274a4178eb4b6d3e792328eed78da8e715b793fc8f3392cda6b03b89f134"},
{file = "clickhouse_connect-0.7.15-cp310-cp310-win_amd64.whl", hash = "sha256:fa14c8effcd00ca88bcd286af7709e5a4cccf449c5a088a59718de3a9b3284d4"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9020dfa2e4a6230e96db5e36018bfb6bd9c7adcdf69878d9a2f21574c51c981d"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:641a8d8d67ec45917169c2bbb3e87318c162681b9f998fc229125b0274fe5fdd"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4ee3c75dbdcf02ad6dd445bd32c28f19473862b82bb5ca4563235e4d27f33e6"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d8852b52c096f4e379c55aec68345a824f10391a7539f95959e613e97aae765"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9daf4c918f1b2795c403f9e3068bc157d4cf4fd80e740866cb47f6e49958822f"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8b0f151be2d155ecc7a69b3ad7d89a79ab4397846268dec1a38e4184ee1e9ac6"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:92c0cc4c4cd0abcd2678b80efe6aaa0671b34343038c26cc94dec3cb515d0da7"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3277a65e95158da052db18ffd212d82359907edad759319afaff0726df258d"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-win32.whl", hash = "sha256:d26142b45a0c0e25b28a61d2084d5b9b85e7c729e72c04ab7a346224500b5254"},
{file = "clickhouse_connect-0.7.15-cp311-cp311-win_amd64.whl", hash = "sha256:a7266528d22001dcfd706c619eeddafa025145b5e3cb4bba99ab0cc35e8b5a0d"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:55b25f7bb2893077d7c607c64dcef0e34fa7807f911a6dd12545a4283bd4cd56"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:524989f3a58da753291b579a2b2b5aa7522531f7b28aecb271b2fa9751b52e11"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a7dd4a139a70cc08bd7f0f787267cc79ca861bff0bfd7cc95e0caf0d8941463"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20f990bedaca13f7acc3772376657f8ec921779caf02f6e69f06c9eee326f9ae"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ba208b389a45eafc436ffc40e749eed27c9c09812693c347993608175d93369"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e333185f2e0e417bf0c98d98fd2dbc5bbfad1f58290fb7f3d41eaa879b7bdb55"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:207c765cf77ac4ffdd1a71c83b2a8773fc6be74c4bd75ada2e51a258155e7e03"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:259ceb14a20f5b1e45f3cef438bb2d342a0c542dbaeef3da3f248313277a1ad7"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-win32.whl", hash = "sha256:cee0aa53574801ea6901bf1b69a06475c943b2a16cab8d5aec6a027d185592e8"},
{file = "clickhouse_connect-0.7.15-cp312-cp312-win_amd64.whl", hash = "sha256:531d6705339568995895bf8bf900d720a8ef715825b1f47611860ccba55c256c"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d192dc16aaa166d73c6bf7ef98d9a8fd4fe7a470d864a778ef2b5be284956145"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:54e0891d2879d8956c3aaf56ca7f26712c5200b2dba71e7875f453362618a1a0"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f0c6a3342763b7e0783dfc9a12c5015ab9037a1a1a799c0a16a98eabbf9c850"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50b6a4c421188abe1216f2f7fd76811525343735df80ad40b8227beacef788c4"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db3b5b5205bb58bc7b107c0dd67e1b5c6d3e8a0ac61c7e682087cf03c39d2afc"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:24de7a82e063d78a97232a19cf0f6c91120b02594eede0a999571466f42e16cd"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:50a24098ab91baa2a599ab2cb31d4b5ffc56ac43f0e7b4c201c6e5dafd22112f"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa77ebfed3f3576cc023bf7b0b643da6cf62aa7919c1cd0d0685f5eeef55a3f9"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-win32.whl", hash = "sha256:78c963fc9cf8fc86cb68e156819bef617ec2fb08758bb1f3a17dde78d7ae06fa"},
{file = "clickhouse_connect-0.7.15-cp38-cp38-win_amd64.whl", hash = "sha256:4a8caba99b4175e1fafd3c9035da1332d1a902c6b2067c5641111cc5337ba524"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5552eaa7e0f09d165df5851d0ecc7d3a4c66607630328befa0fbe5068f2d7008"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02712f2aa16e83ad5b38c5d8952a3f8feb76c71e31edc57f4dd4ef55619f78a8"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69f8bf1c7168425f04e068f8644c45e3cabc3bae464db83eedd13dab7ce25c7e"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:663b9c390caae86456e41c24a71542b5f68b00d9fd6b30764189433012821009"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15f02b953e4e9efa085d37eb0c8ac28b5935e0f9dc3c46c7d6bb5bdd8a70dc5e"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:85da6a61c96b8866fd4e2e96c54ff371c37a66677370e72db3bbcab108b3c5e1"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e6fd30802d5078065bdb5eab42969476d3064d9293a29e26863eb50997b0f509"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:62809176d70e2c328a8c6db1beffcc1296bc4fcd3fc047faa9e5002e9ba0e1ff"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-win32.whl", hash = "sha256:001f2ee736d1bdbf742357c7cf6aea72cb377c8e8f9e47d9470d8620f4166124"},
{file = "clickhouse_connect-0.7.15-cp39-cp39-win_amd64.whl", hash = "sha256:3c1d2470ec8ba017d28deb09725f2b1da86ffac18456df0189daeca4a8960346"},
{file = "clickhouse_connect-0.7.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0f23d7978a80f3b5b4f25c8ed14f88fecee03be1377bd5517f04403a71b37c44"},
{file = "clickhouse_connect-0.7.15-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe640ce36958ee6fe59368c2312d032454a8826eb331b19dfdc9bb27c6f4ac27"},
{file = "clickhouse_connect-0.7.15-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8d3e9ed014d03b61e4613617fc2d4780a1116e66baa413c9d60aa69c525a07b"},
{file = "clickhouse_connect-0.7.15-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f14a6496c3f3e5b712f228e285b571bbecb3dcceff61eee28fcfc0b62aa124f"},
{file = "clickhouse_connect-0.7.15-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:262fd092b83cfbb1f3034bfe718a27d683af4b988105acf77f548d7766655c16"},
{file = "clickhouse_connect-0.7.15-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:af011dc852fb04bcb20c602758c54664653490b0af6509194fafe1a59203c319"},
{file = "clickhouse_connect-0.7.15-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f4d5fc00175fa41a85c81ccb97ab847cf8ba831d0ad737e6abc8364528c7aa5"},
{file = "clickhouse_connect-0.7.15-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8eec9684d0eb3eab7e37aeafe08e998b9a7f4311822d4f4b423fad0026e610cb"},
{file = "clickhouse_connect-0.7.15-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8567e4d2e36cb4a40d255bbf9d0408da05f47360f4727d493b88a300de94571c"},
{file = "clickhouse_connect-0.7.15-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ab6571b625a52dea1458b0387fde1edce7613867d98f2144aad647bc10ebf578"},
{file = "clickhouse_connect-0.7.15-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bf8950c96e7960072b227a4d338a73d1f3f72eecc572a2eacd17450bad7bce61"},
{file = "clickhouse_connect-0.7.15-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a0a2ef9c1a40842196257d6cefb4be3b799efe3340293e0996f2c3eaa268e24"},
{file = "clickhouse_connect-0.7.15-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5c19a3bc226ef861e344e5cf9ed10a71359d3a51abb2a520d416d84887be6a3"},
{file = "clickhouse_connect-0.7.15-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e5b08ee2739ba551bf9f2f1c641175a9d2b32c7d322b1130b6d0a4cf478cf60"},
{file = "clickhouse_connect-0.7.15-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ee7596f9b1541342e907b11daf65107ad5cc1a95de06428b8389fe7f9095554"},
]
[package.dependencies]
certifi = "*"
lz4 = "*"
pytz = "*"
urllib3 = ">=1.26"
zstandard = "*"
[package.extras]
arrow = ["pyarrow"]
numpy = ["numpy"]
orjson = ["orjson"]
pandas = ["pandas"]
sqlalchemy = ["sqlalchemy (>1.3.21,<2.0)"]
tzlocal = ["tzlocal (>=4.0)"]
[[package]] [[package]]
name = "colorama" name = "colorama"
version = "0.4.6" version = "0.4.6"
@ -1092,6 +1182,27 @@ files = [
dnspython = ">=2.0.0" dnspython = ">=2.0.0"
idna = ">=2.0.0" idna = ">=2.0.0"
[[package]]
name = "environs"
version = "9.5.0"
description = "simplified environment variable parsing"
optional = true
python-versions = ">=3.6"
files = [
{file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"},
{file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"},
]
[package.dependencies]
marshmallow = ">=3.0.0"
python-dotenv = "*"
[package.extras]
dev = ["dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "pytest", "tox"]
django = ["dj-database-url", "dj-email-url", "django-cache-url"]
lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"]
tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"]
[[package]] [[package]]
name = "fastapi" name = "fastapi"
version = "0.111.0" version = "0.111.0"
@ -1142,13 +1253,18 @@ standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
[[package]] [[package]]
name = "ffmpy" name = "ffmpy"
version = "0.3.1" version = "0.3.2"
description = "A simple Python wrapper for ffmpeg" description = "A simple Python wrapper for ffmpeg"
optional = true optional = true
python-versions = "*" python-versions = "*"
files = [ files = []
{file = "ffmpy-0.3.1.tar.gz", hash = "sha256:a173b8f42c7c669ff722df7fb31e1e870067713697f745224fa6e621b82f0004"}, develop = false
]
[package.source]
type = "git"
url = "https://github.com/EuDs63/ffmpy.git"
reference = "HEAD"
resolved_reference = "333a19ee4d21f32537c0508aa1942ef1aa7afe24"
[[package]] [[package]]
name = "filelock" name = "filelock"
@ -1363,6 +1479,66 @@ smb = ["smbprotocol"]
ssh = ["paramiko"] ssh = ["paramiko"]
tqdm = ["tqdm"] tqdm = ["tqdm"]
[[package]]
name = "google-ai-generativelanguage"
version = "0.6.4"
description = "Google Ai Generativelanguage API client library"
optional = true
python-versions = ">=3.7"
files = [
{file = "google-ai-generativelanguage-0.6.4.tar.gz", hash = "sha256:1750848c12af96cb24ae1c3dd05e4bfe24867dc4577009ed03e1042d8421e874"},
{file = "google_ai_generativelanguage-0.6.4-py3-none-any.whl", hash = "sha256:730e471aa549797118fb1c88421ba1957741433ada575cf5dd08d3aebf903ab1"},
]
[package.dependencies]
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]}
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev"
proto-plus = ">=1.22.3,<2.0.0dev"
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
[[package]]
name = "google-api-core"
version = "2.19.1"
description = "Google API client core library"
optional = true
python-versions = ">=3.7"
files = [
{file = "google-api-core-2.19.1.tar.gz", hash = "sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd"},
{file = "google_api_core-2.19.1-py3-none-any.whl", hash = "sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125"},
]
[package.dependencies]
google-auth = ">=2.14.1,<3.0.dev0"
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
grpcio = {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}
grpcio-status = {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}
proto-plus = ">=1.22.3,<2.0.0dev"
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
requests = ">=2.18.0,<3.0.0.dev0"
[package.extras]
grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"]
grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
[[package]]
name = "google-api-python-client"
version = "2.136.0"
description = "Google API Client Library for Python"
optional = true
python-versions = ">=3.7"
files = [
{file = "google-api-python-client-2.136.0.tar.gz", hash = "sha256:161c722c8864e7ed39393e2b7eea76ef4e1c933a6a59f9d7c70409b6635f225d"},
{file = "google_api_python_client-2.136.0-py2.py3-none-any.whl", hash = "sha256:5a554c8b5edf0a609b905d89d7ced82e8f6ac31da1e4d8d5684ef63dbc0e49f5"},
]
[package.dependencies]
google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0"
google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0"
google-auth-httplib2 = ">=0.2.0,<1.0.0"
httplib2 = ">=0.19.0,<1.dev0"
uritemplate = ">=3.0.1,<5"
[[package]] [[package]]
name = "google-auth" name = "google-auth"
version = "2.25.2" version = "2.25.2"
@ -1386,6 +1562,44 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
reauth = ["pyu2f (>=0.1.5)"] reauth = ["pyu2f (>=0.1.5)"]
requests = ["requests (>=2.20.0,<3.0.0.dev0)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
[[package]]
name = "google-auth-httplib2"
version = "0.2.0"
description = "Google Authentication Library: httplib2 transport"
optional = true
python-versions = "*"
files = [
{file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"},
{file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"},
]
[package.dependencies]
google-auth = "*"
httplib2 = ">=0.19.0"
[[package]]
name = "google-generativeai"
version = "0.5.4"
description = "Google Generative AI High level API client library and tools."
optional = true
python-versions = ">=3.9"
files = [
{file = "google_generativeai-0.5.4-py3-none-any.whl", hash = "sha256:036d63ee35e7c8aedceda4f81c390a5102808af09ff3a6e57e27ed0be0708f3c"},
]
[package.dependencies]
google-ai-generativelanguage = "0.6.4"
google-api-core = "*"
google-api-python-client = "*"
google-auth = ">=2.15.0"
protobuf = "*"
pydantic = "*"
tqdm = "*"
typing-extensions = "*"
[package.extras]
dev = ["Pillow", "absl-py", "black", "ipython", "nose2", "pandas", "pytype", "pyyaml"]
[[package]] [[package]]
name = "googleapis-common-protos" name = "googleapis-common-protos"
version = "1.62.0" version = "1.62.0"
@ -1602,6 +1816,22 @@ files = [
[package.extras] [package.extras]
protobuf = ["grpcio-tools (>=1.60.0)"] protobuf = ["grpcio-tools (>=1.60.0)"]
[[package]]
name = "grpcio-status"
version = "1.60.0"
description = "Status proto mapping for gRPC"
optional = true
python-versions = ">=3.6"
files = [
{file = "grpcio-status-1.60.0.tar.gz", hash = "sha256:f10e0b6db3adc0fdc244b71962814ee982996ef06186446b5695b9fa635aa1ab"},
{file = "grpcio_status-1.60.0-py3-none-any.whl", hash = "sha256:7d383fa36e59c1e61d380d91350badd4d12ac56e4de2c2b831b050362c3c572e"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
grpcio = ">=1.60.0"
protobuf = ">=4.21.6"
[[package]] [[package]]
name = "grpcio-tools" name = "grpcio-tools"
version = "1.60.0" version = "1.60.0"
@ -1728,6 +1958,20 @@ http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"] socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.23.0)"] trio = ["trio (>=0.22.0,<0.23.0)"]
[[package]]
name = "httplib2"
version = "0.22.0"
description = "A comprehensive HTTP client library."
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"},
{file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"},
]
[package.dependencies]
pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""}
[[package]] [[package]]
name = "httptools" name = "httptools"
version = "0.6.1" version = "0.6.1"
@ -1778,13 +2022,13 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
[[package]] [[package]]
name = "httpx" name = "httpx"
version = "0.25.2" version = "0.27.0"
description = "The next generation HTTP client." description = "The next generation HTTP client."
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
{file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
] ]
[package.dependencies] [package.dependencies]
@ -2273,6 +2517,21 @@ llama-index-core = ">=0.10.11.post1,<0.11.0"
llama-index-embeddings-openai = ">=0.1.3,<0.2.0" llama-index-embeddings-openai = ">=0.1.3,<0.2.0"
llama-index-llms-azure-openai = ">=0.1.3,<0.2.0" llama-index-llms-azure-openai = ">=0.1.3,<0.2.0"
[[package]]
name = "llama-index-embeddings-gemini"
version = "0.1.8"
description = "llama-index embeddings gemini integration"
optional = true
python-versions = "<4.0,>=3.9"
files = [
{file = "llama_index_embeddings_gemini-0.1.8-py3-none-any.whl", hash = "sha256:b0afd99706cec2cf2f0c5e1f70675170777e82e96a7459f231da07c79408b469"},
{file = "llama_index_embeddings_gemini-0.1.8.tar.gz", hash = "sha256:ad02b23ea667c95607da1e534d5579de461895d1944560115a3601ac16f87793"},
]
[package.dependencies]
google-generativeai = ">=0.5.2,<0.6.0"
llama-index-core = ">=0.10.11.post1,<0.11.0"
[[package]] [[package]]
name = "llama-index-embeddings-huggingface" name = "llama-index-embeddings-huggingface"
version = "0.2.2" version = "0.2.2"
@ -2334,6 +2593,21 @@ httpx = "*"
llama-index-core = ">=0.10.11.post1,<0.11.0" llama-index-core = ">=0.10.11.post1,<0.11.0"
llama-index-llms-openai = ">=0.1.1,<0.2.0" llama-index-llms-openai = ">=0.1.1,<0.2.0"
[[package]]
name = "llama-index-llms-gemini"
version = "0.1.11"
description = "llama-index llms gemini integration"
optional = true
python-versions = "<4.0,>=3.9"
files = [
{file = "llama_index_llms_gemini-0.1.11-py3-none-any.whl", hash = "sha256:0031853d938875ba2975c2c92c40d6ac18b6dde1f0103cdee10bfc87d128fb34"},
]
[package.dependencies]
google-generativeai = ">=0.5.2,<0.6.0"
llama-index-core = ">=0.10.11.post1,<0.11.0"
pillow = ">=10.2.0,<11.0.0"
[[package]] [[package]]
name = "llama-index-llms-llama-cpp" name = "llama-index-llms-llama-cpp"
version = "0.1.4" version = "0.1.4"
@ -2351,17 +2625,18 @@ llama-index-core = ">=0.10.1,<0.11.0"
[[package]] [[package]]
name = "llama-index-llms-ollama" name = "llama-index-llms-ollama"
version = "0.1.5" version = "0.2.2"
description = "llama-index llms ollama integration" description = "llama-index llms ollama integration"
optional = true optional = true
python-versions = "<4.0,>=3.8.1" python-versions = "<4.0,>=3.8.1"
files = [ files = [
{file = "llama_index_llms_ollama-0.1.5-py3-none-any.whl", hash = "sha256:8e237978765458c9b175d2e25fc25162df8dc70a538b1b9ef9ea18617f8cdf5a"}, {file = "llama_index_llms_ollama-0.2.2-py3-none-any.whl", hash = "sha256:c224d7c17d641045bc9b6a6681dab434c1c421af0bacb5825eea444fefd8ed78"},
{file = "llama_index_llms_ollama-0.1.5.tar.gz", hash = "sha256:75697d96c860d87e80cce90c9ea425cbd236918458e0feaaee03597068ba9844"}, {file = "llama_index_llms_ollama-0.2.2.tar.gz", hash = "sha256:0c7f192cb8b768707bd5154b97e2a41284732d62070eb76190dee125e95245ea"},
] ]
[package.dependencies] [package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0" llama-index-core = ">=0.10.1,<0.11.0"
ollama = ">=0.3.0"
[[package]] [[package]]
name = "llama-index-llms-openai" name = "llama-index-llms-openai"
@ -2472,6 +2747,36 @@ files = [
chromadb = ">=0.4.0,<0.6.0" chromadb = ">=0.4.0,<0.6.0"
llama-index-core = ">=0.10.1,<0.11.0" llama-index-core = ">=0.10.1,<0.11.0"
[[package]]
name = "llama-index-vector-stores-clickhouse"
version = "0.1.3"
description = "llama-index vector_stores clickhouse integration"
optional = true
python-versions = ">=3.8.1,<4.0"
files = [
{file = "llama_index_vector_stores_clickhouse-0.1.3-py3-none-any.whl", hash = "sha256:fb832aed830e8190db5f29607a84bdf8e99c01f08226b4a672911ca9b11b4546"},
{file = "llama_index_vector_stores_clickhouse-0.1.3.tar.gz", hash = "sha256:787ca0b9391abe0f514ae25d2c42e890f1ecbb9ae254337329232546e3355ee1"},
]
[package.dependencies]
clickhouse-connect = ">=0.7.0,<0.8.0"
llama-index-core = ">=0.10.5,<0.11.0"
[[package]]
name = "llama-index-vector-stores-milvus"
version = "0.1.20"
description = "llama-index vector_stores milvus integration"
optional = true
python-versions = "<4.0,>=3.8.1"
files = [
{file = "llama_index_vector_stores_milvus-0.1.20-py3-none-any.whl", hash = "sha256:27a61fd237e67b648f36964c2e25275df4cb20dd740d111f0b75db477259ef5b"},
{file = "llama_index_vector_stores_milvus-0.1.20.tar.gz", hash = "sha256:461bccce036be7bb739e57eb3855f64557c506023febfc08f98899778d460602"},
]
[package.dependencies]
llama-index-core = ">=0.10.1,<0.11.0"
pymilvus = ">=2.3.6,<3.0.0"
[[package]] [[package]]
name = "llama-index-vector-stores-postgres" name = "llama-index-vector-stores-postgres"
version = "0.1.11" version = "0.1.11"
@ -2506,6 +2811,56 @@ grpcio = ">=1.60.0,<2.0.0"
llama-index-core = ">=0.10.1,<0.11.0" llama-index-core = ">=0.10.1,<0.11.0"
qdrant-client = ">=1.7.1,<2.0.0" qdrant-client = ">=1.7.1,<2.0.0"
[[package]]
name = "lz4"
version = "4.3.3"
description = "LZ4 Bindings for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "lz4-4.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b891880c187e96339474af2a3b2bfb11a8e4732ff5034be919aa9029484cd201"},
{file = "lz4-4.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:222a7e35137d7539c9c33bb53fcbb26510c5748779364014235afc62b0ec797f"},
{file = "lz4-4.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f76176492ff082657ada0d0f10c794b6da5800249ef1692b35cf49b1e93e8ef7"},
{file = "lz4-4.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1d18718f9d78182c6b60f568c9a9cec8a7204d7cb6fad4e511a2ef279e4cb05"},
{file = "lz4-4.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cdc60e21ec70266947a48839b437d46025076eb4b12c76bd47f8e5eb8a75dcc"},
{file = "lz4-4.3.3-cp310-cp310-win32.whl", hash = "sha256:c81703b12475da73a5d66618856d04b1307e43428a7e59d98cfe5a5d608a74c6"},
{file = "lz4-4.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:43cf03059c0f941b772c8aeb42a0813d68d7081c009542301637e5782f8a33e2"},
{file = "lz4-4.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:30e8c20b8857adef7be045c65f47ab1e2c4fabba86a9fa9a997d7674a31ea6b6"},
{file = "lz4-4.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2f7b1839f795315e480fb87d9bc60b186a98e3e5d17203c6e757611ef7dcef61"},
{file = "lz4-4.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edfd858985c23523f4e5a7526ca6ee65ff930207a7ec8a8f57a01eae506aaee7"},
{file = "lz4-4.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e9c410b11a31dbdc94c05ac3c480cb4b222460faf9231f12538d0074e56c563"},
{file = "lz4-4.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2507ee9c99dbddd191c86f0e0c8b724c76d26b0602db9ea23232304382e1f21"},
{file = "lz4-4.3.3-cp311-cp311-win32.whl", hash = "sha256:f180904f33bdd1e92967923a43c22899e303906d19b2cf8bb547db6653ea6e7d"},
{file = "lz4-4.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:b14d948e6dce389f9a7afc666d60dd1e35fa2138a8ec5306d30cd2e30d36b40c"},
{file = "lz4-4.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e36cd7b9d4d920d3bfc2369840da506fa68258f7bb176b8743189793c055e43d"},
{file = "lz4-4.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:31ea4be9d0059c00b2572d700bf2c1bc82f241f2c3282034a759c9a4d6ca4dc2"},
{file = "lz4-4.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c9a6fd20767ccaf70649982f8f3eeb0884035c150c0b818ea660152cf3c809"},
{file = "lz4-4.3.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca8fccc15e3add173da91be8f34121578dc777711ffd98d399be35487c934bf"},
{file = "lz4-4.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d84b479ddf39fe3ea05387f10b779155fc0990125f4fb35d636114e1c63a2e"},
{file = "lz4-4.3.3-cp312-cp312-win32.whl", hash = "sha256:337cb94488a1b060ef1685187d6ad4ba8bc61d26d631d7ba909ee984ea736be1"},
{file = "lz4-4.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:5d35533bf2cee56f38ced91f766cd0038b6abf46f438a80d50c52750088be93f"},
{file = "lz4-4.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:363ab65bf31338eb364062a15f302fc0fab0a49426051429866d71c793c23394"},
{file = "lz4-4.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a136e44a16fc98b1abc404fbabf7f1fada2bdab6a7e970974fb81cf55b636d0"},
{file = "lz4-4.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abc197e4aca8b63f5ae200af03eb95fb4b5055a8f990079b5bdf042f568469dd"},
{file = "lz4-4.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56f4fe9c6327adb97406f27a66420b22ce02d71a5c365c48d6b656b4aaeb7775"},
{file = "lz4-4.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0e822cd7644995d9ba248cb4b67859701748a93e2ab7fc9bc18c599a52e4604"},
{file = "lz4-4.3.3-cp38-cp38-win32.whl", hash = "sha256:24b3206de56b7a537eda3a8123c644a2b7bf111f0af53bc14bed90ce5562d1aa"},
{file = "lz4-4.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:b47839b53956e2737229d70714f1d75f33e8ac26e52c267f0197b3189ca6de24"},
{file = "lz4-4.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6756212507405f270b66b3ff7f564618de0606395c0fe10a7ae2ffcbbe0b1fba"},
{file = "lz4-4.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee9ff50557a942d187ec85462bb0960207e7ec5b19b3b48949263993771c6205"},
{file = "lz4-4.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b901c7784caac9a1ded4555258207d9e9697e746cc8532129f150ffe1f6ba0d"},
{file = "lz4-4.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d9ec061b9eca86e4dcc003d93334b95d53909afd5a32c6e4f222157b50c071"},
{file = "lz4-4.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4c7bf687303ca47d69f9f0133274958fd672efaa33fb5bcde467862d6c621f0"},
{file = "lz4-4.3.3-cp39-cp39-win32.whl", hash = "sha256:054b4631a355606e99a42396f5db4d22046a3397ffc3269a348ec41eaebd69d2"},
{file = "lz4-4.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:eac9af361e0d98335a02ff12fb56caeb7ea1196cf1a49dbf6f17828a131da807"},
{file = "lz4-4.3.3.tar.gz", hash = "sha256:01fe674ef2889dbb9899d8a67361e0c4a2c833af5aeb37dd505727cf5d2a131e"},
]
[package.extras]
docs = ["sphinx (>=1.6.0)", "sphinx-bootstrap-theme"]
flake8 = ["flake8"]
tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"]
[[package]] [[package]]
name = "markdown-it-py" name = "markdown-it-py"
version = "3.0.0" version = "3.0.0"
@ -2678,6 +3033,22 @@ files = [
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
] ]
[[package]]
name = "milvus-lite"
version = "2.4.8"
description = "A lightweight version of Milvus wrapped with Python."
optional = true
python-versions = ">=3.7"
files = [
{file = "milvus_lite-2.4.8-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:b7e90b34b214884cd44cdc112ab243d4cb197b775498355e2437b6cafea025fe"},
{file = "milvus_lite-2.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:519dfc62709d8f642d98a1c5b1dcde7080d107e6e312d677fef5a3412a40ac08"},
{file = "milvus_lite-2.4.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b21f36d24cbb0e920b4faad607019bb28c1b2c88b4d04680ac8c7697a4ae8a4d"},
{file = "milvus_lite-2.4.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:08332a2b9abfe7c4e1d7926068937e46f8fb81f2707928b7bc02c9dc99cebe41"},
]
[package.dependencies]
tqdm = "*"
[[package]] [[package]]
name = "minijinja" name = "minijinja"
version = "2.0.1" version = "2.0.1"
@ -3263,6 +3634,20 @@ rsa = ["cryptography (>=3.0.0)"]
signals = ["blinker (>=1.4.0)"] signals = ["blinker (>=1.4.0)"]
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ollama"
version = "0.3.0"
description = "The official Python client for Ollama."
optional = true
python-versions = "<4.0,>=3.8"
files = [
{file = "ollama-0.3.0-py3-none-any.whl", hash = "sha256:cd7010c4e2a37d7f08f36cd35c4592b14f1ec0d1bf3df10342cd47963d81ad7a"},
{file = "ollama-0.3.0.tar.gz", hash = "sha256:6ff493a2945ba76cdd6b7912a1cd79a45cfd9ba9120d14adeb63b2b5a7f353da"},
]
[package.dependencies]
httpx = ">=0.27.0,<0.28.0"
[[package]] [[package]]
name = "onnxruntime" name = "onnxruntime"
version = "1.17.1" version = "1.17.1"
@ -3665,70 +4050,100 @@ numpy = "*"
[[package]] [[package]]
name = "pillow" name = "pillow"
version = "10.1.0" version = "10.4.0"
description = "Python Imaging Library (Fork)" description = "Python Imaging Library (Fork)"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
{file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
{file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"},
{file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"},
{file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"},
{file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"},
{file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"},
{file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"},
{file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"},
{file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"},
{file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"},
{file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"},
{file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"},
{file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"},
{file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"},
{file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"},
{file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"},
{file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"},
{file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"},
{file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"},
{file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"},
{file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"},
{file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"},
{file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"},
{file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"},
{file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"},
{file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"},
{file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"},
{file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"},
{file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"},
{file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"},
{file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"},
{file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"},
{file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"},
{file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"},
{file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"},
{file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"},
{file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"},
{file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"},
{file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"},
{file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"},
{file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"},
{file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"},
{file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"},
{file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"},
{file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"},
{file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"},
{file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"},
{file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"},
{file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"},
{file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"},
{file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"},
{file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"},
{file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"},
{file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"},
{file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"},
{file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"},
{file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"},
{file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"},
{file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"},
{file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"},
{file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"},
{file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"},
{file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"},
{file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"},
{file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"},
{file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"},
{file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"},
] ]
[package.extras] [package.extras]
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
fpx = ["olefile"]
mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
typing = ["typing-extensions"]
xmp = ["defusedxml"]
[[package]] [[package]]
name = "platformdirs" name = "platformdirs"
@ -3820,6 +4235,23 @@ nodeenv = ">=0.11.1"
pyyaml = ">=5.1" pyyaml = ">=5.1"
virtualenv = ">=20.10.0" virtualenv = ">=20.10.0"
[[package]]
name = "proto-plus"
version = "1.24.0"
description = "Beautiful, Pythonic protocol buffers."
optional = true
python-versions = ">=3.7"
files = [
{file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"},
{file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"},
]
[package.dependencies]
protobuf = ">=3.19.0,<6.0.0dev"
[package.extras]
testing = ["google-api-core (>=1.31.5)"]
[[package]] [[package]]
name = "protobuf" name = "protobuf"
version = "4.25.1" version = "4.25.1"
@ -4218,6 +4650,31 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pymilvus"
version = "2.4.4"
description = "Python Sdk for Milvus"
optional = true
python-versions = ">=3.8"
files = [
{file = "pymilvus-2.4.4-py3-none-any.whl", hash = "sha256:073b76bc36f6f4e70f0f0a0023a53324f0ba8ef9a60883f87cd30a44b6c6f2b5"},
{file = "pymilvus-2.4.4.tar.gz", hash = "sha256:50c53eb103e034fbffe936fe942751ea3dbd2452e18cf79acc52360ed4987fb7"},
]
[package.dependencies]
environs = "<=9.5.0"
grpcio = ">=1.49.1,<=1.63.0"
milvus-lite = {version = ">=2.4.0,<2.5.0", markers = "sys_platform != \"win32\""}
pandas = ">=1.2.4"
protobuf = ">=3.20.0"
setuptools = ">=67"
ujson = ">=2.0.0"
[package.extras]
bulk-writer = ["azure-storage-blob", "minio (>=7.0.0)", "pyarrow (>=12.0.0)", "requests"]
dev = ["black", "grpcio (==1.62.2)", "grpcio-testing (==1.62.2)", "grpcio-tools (==1.62.2)", "pytest (>=5.3.4)", "pytest-cov (>=2.8.1)", "pytest-timeout (>=1.3.4)", "ruff (>0.4.0)"]
model = ["milvus-model (>=0.1.0)"]
[[package]] [[package]]
name = "pyparsing" name = "pyparsing"
version = "3.1.1" version = "3.1.1"
@ -5797,6 +6254,17 @@ files = [
{file = "ujson-5.9.0.tar.gz", hash = "sha256:89cc92e73d5501b8a7f48575eeb14ad27156ad092c2e9fc7e3cf949f07e75532"}, {file = "ujson-5.9.0.tar.gz", hash = "sha256:89cc92e73d5501b8a7f48575eeb14ad27156ad092c2e9fc7e3cf949f07e75532"},
] ]
[[package]]
name = "uritemplate"
version = "4.1.1"
description = "Implementation of RFC 6570 URI Templates"
optional = true
python-versions = ">=3.6"
files = [
{file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"},
{file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"},
]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "2.2.2" version = "2.2.2"
@ -6326,26 +6794,91 @@ files = [
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
[[package]]
name = "zstandard"
version = "0.22.0"
description = "Zstandard bindings for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "zstandard-0.22.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:275df437ab03f8c033b8a2c181e51716c32d831082d93ce48002a5227ec93019"},
{file = "zstandard-0.22.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ac9957bc6d2403c4772c890916bf181b2653640da98f32e04b96e4d6fb3252a"},
{file = "zstandard-0.22.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe3390c538f12437b859d815040763abc728955a52ca6ff9c5d4ac707c4ad98e"},
{file = "zstandard-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1958100b8a1cc3f27fa21071a55cb2ed32e9e5df4c3c6e661c193437f171cba2"},
{file = "zstandard-0.22.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e1856c8313bc688d5df069e106a4bc962eef3d13372020cc6e3ebf5e045202"},
{file = "zstandard-0.22.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1a90ba9a4c9c884bb876a14be2b1d216609385efb180393df40e5172e7ecf356"},
{file = "zstandard-0.22.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3db41c5e49ef73641d5111554e1d1d3af106410a6c1fb52cf68912ba7a343a0d"},
{file = "zstandard-0.22.0-cp310-cp310-win32.whl", hash = "sha256:d8593f8464fb64d58e8cb0b905b272d40184eac9a18d83cf8c10749c3eafcd7e"},
{file = "zstandard-0.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:f1a4b358947a65b94e2501ce3e078bbc929b039ede4679ddb0460829b12f7375"},
{file = "zstandard-0.22.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:589402548251056878d2e7c8859286eb91bd841af117dbe4ab000e6450987e08"},
{file = "zstandard-0.22.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a97079b955b00b732c6f280d5023e0eefe359045e8b83b08cf0333af9ec78f26"},
{file = "zstandard-0.22.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:445b47bc32de69d990ad0f34da0e20f535914623d1e506e74d6bc5c9dc40bb09"},
{file = "zstandard-0.22.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33591d59f4956c9812f8063eff2e2c0065bc02050837f152574069f5f9f17775"},
{file = "zstandard-0.22.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:888196c9c8893a1e8ff5e89b8f894e7f4f0e64a5af4d8f3c410f0319128bb2f8"},
{file = "zstandard-0.22.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:53866a9d8ab363271c9e80c7c2e9441814961d47f88c9bc3b248142c32141d94"},
{file = "zstandard-0.22.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ac59d5d6910b220141c1737b79d4a5aa9e57466e7469a012ed42ce2d3995e88"},
{file = "zstandard-0.22.0-cp311-cp311-win32.whl", hash = "sha256:2b11ea433db22e720758cba584c9d661077121fcf60ab43351950ded20283440"},
{file = "zstandard-0.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:11f0d1aab9516a497137b41e3d3ed4bbf7b2ee2abc79e5c8b010ad286d7464bd"},
{file = "zstandard-0.22.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6c25b8eb733d4e741246151d895dd0308137532737f337411160ff69ca24f93a"},
{file = "zstandard-0.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f9b2cde1cd1b2a10246dbc143ba49d942d14fb3d2b4bccf4618d475c65464912"},
{file = "zstandard-0.22.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a88b7df61a292603e7cd662d92565d915796b094ffb3d206579aaebac6b85d5f"},
{file = "zstandard-0.22.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466e6ad8caefb589ed281c076deb6f0cd330e8bc13c5035854ffb9c2014b118c"},
{file = "zstandard-0.22.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1d67d0d53d2a138f9e29d8acdabe11310c185e36f0a848efa104d4e40b808e4"},
{file = "zstandard-0.22.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:39b2853efc9403927f9065cc48c9980649462acbdf81cd4f0cb773af2fd734bc"},
{file = "zstandard-0.22.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8a1b2effa96a5f019e72874969394edd393e2fbd6414a8208fea363a22803b45"},
{file = "zstandard-0.22.0-cp312-cp312-win32.whl", hash = "sha256:88c5b4b47a8a138338a07fc94e2ba3b1535f69247670abfe422de4e0b344aae2"},
{file = "zstandard-0.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:de20a212ef3d00d609d0b22eb7cc798d5a69035e81839f549b538eff4105d01c"},
{file = "zstandard-0.22.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d75f693bb4e92c335e0645e8845e553cd09dc91616412d1d4650da835b5449df"},
{file = "zstandard-0.22.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:36a47636c3de227cd765e25a21dc5dace00539b82ddd99ee36abae38178eff9e"},
{file = "zstandard-0.22.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68953dc84b244b053c0d5f137a21ae8287ecf51b20872eccf8eaac0302d3e3b0"},
{file = "zstandard-0.22.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2612e9bb4977381184bb2463150336d0f7e014d6bb5d4a370f9a372d21916f69"},
{file = "zstandard-0.22.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23d2b3c2b8e7e5a6cb7922f7c27d73a9a615f0a5ab5d0e03dd533c477de23004"},
{file = "zstandard-0.22.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d43501f5f31e22baf822720d82b5547f8a08f5386a883b32584a185675c8fbf"},
{file = "zstandard-0.22.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a493d470183ee620a3df1e6e55b3e4de8143c0ba1b16f3ded83208ea8ddfd91d"},
{file = "zstandard-0.22.0-cp38-cp38-win32.whl", hash = "sha256:7034d381789f45576ec3f1fa0e15d741828146439228dc3f7c59856c5bcd3292"},
{file = "zstandard-0.22.0-cp38-cp38-win_amd64.whl", hash = "sha256:d8fff0f0c1d8bc5d866762ae95bd99d53282337af1be9dc0d88506b340e74b73"},
{file = "zstandard-0.22.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2fdd53b806786bd6112d97c1f1e7841e5e4daa06810ab4b284026a1a0e484c0b"},
{file = "zstandard-0.22.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:73a1d6bd01961e9fd447162e137ed949c01bdb830dfca487c4a14e9742dccc93"},
{file = "zstandard-0.22.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9501f36fac6b875c124243a379267d879262480bf85b1dbda61f5ad4d01b75a3"},
{file = "zstandard-0.22.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f260e4c7294ef275744210a4010f116048e0c95857befb7462e033f09442fe"},
{file = "zstandard-0.22.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959665072bd60f45c5b6b5d711f15bdefc9849dd5da9fb6c873e35f5d34d8cfb"},
{file = "zstandard-0.22.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d22fdef58976457c65e2796e6730a3ea4a254f3ba83777ecfc8592ff8d77d303"},
{file = "zstandard-0.22.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a7ccf5825fd71d4542c8ab28d4d482aace885f5ebe4b40faaa290eed8e095a4c"},
{file = "zstandard-0.22.0-cp39-cp39-win32.whl", hash = "sha256:f058a77ef0ece4e210bb0450e68408d4223f728b109764676e1a13537d056bb0"},
{file = "zstandard-0.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:e9e9d4e2e336c529d4c435baad846a181e39a982f823f7e4495ec0b0ec8538d2"},
{file = "zstandard-0.22.0.tar.gz", hash = "sha256:8226a33c542bcb54cd6bd0a366067b610b41713b64c9abec1bc4533d69f51e70"},
]
[package.dependencies]
cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\""}
[package.extras]
cffi = ["cffi (>=1.11)"]
[extras] [extras]
embeddings-azopenai = ["llama-index-embeddings-azure-openai"] embeddings-azopenai = ["llama-index-embeddings-azure-openai"]
embeddings-gemini = ["llama-index-embeddings-gemini"]
embeddings-huggingface = ["llama-index-embeddings-huggingface"] embeddings-huggingface = ["llama-index-embeddings-huggingface"]
embeddings-ollama = ["llama-index-embeddings-ollama"] embeddings-ollama = ["llama-index-embeddings-ollama", "ollama"]
embeddings-openai = ["llama-index-embeddings-openai"] embeddings-openai = ["llama-index-embeddings-openai"]
embeddings-sagemaker = ["boto3"] embeddings-sagemaker = ["boto3"]
llms-azopenai = ["llama-index-llms-azure-openai"] llms-azopenai = ["llama-index-llms-azure-openai"]
llms-gemini = ["google-generativeai", "llama-index-llms-gemini"]
llms-llama-cpp = ["llama-index-llms-llama-cpp"] llms-llama-cpp = ["llama-index-llms-llama-cpp"]
llms-ollama = ["llama-index-llms-ollama"] llms-ollama = ["llama-index-llms-ollama", "ollama"]
llms-openai = ["llama-index-llms-openai"] llms-openai = ["llama-index-llms-openai"]
llms-openai-like = ["llama-index-llms-openai-like"] llms-openai-like = ["llama-index-llms-openai-like"]
llms-sagemaker = ["boto3"] llms-sagemaker = ["boto3"]
rerank-sentence-transformers = ["sentence-transformers", "torch"] rerank-sentence-transformers = ["sentence-transformers", "torch"]
storage-nodestore-postgres = ["asyncpg", "llama-index-storage-docstore-postgres", "llama-index-storage-index-store-postgres", "psycopg2-binary"] storage-nodestore-postgres = ["asyncpg", "llama-index-storage-docstore-postgres", "llama-index-storage-index-store-postgres", "psycopg2-binary"]
ui = ["gradio"] ui = ["ffmpy", "gradio"]
vector-stores-chroma = ["llama-index-vector-stores-chroma"] vector-stores-chroma = ["llama-index-vector-stores-chroma"]
vector-stores-clickhouse = ["clickhouse-connect", "llama-index-vector-stores-clickhouse"]
vector-stores-milvus = ["llama-index-vector-stores-milvus"]
vector-stores-postgres = ["llama-index-vector-stores-postgres"] vector-stores-postgres = ["llama-index-vector-stores-postgres"]
vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.11,<3.12" python-versions = ">=3.11,<3.12"
content-hash = "955caf907acef68af94fd63f287e2ba257f8721dcc8d7f371ab10f54d4980bd3" content-hash = "dce5b88d92bcfa047bf1e4c9fe2dbb9c63eb864d6bbca2340801ac0a2f02a8d4"

View file

@ -71,16 +71,46 @@ class EmbeddingComponent:
from llama_index.embeddings.ollama import ( # type: ignore from llama_index.embeddings.ollama import ( # type: ignore
OllamaEmbedding, OllamaEmbedding,
) )
from ollama import Client # type: ignore
except ImportError as e: except ImportError as e:
raise ImportError( raise ImportError(
"Local dependencies not found, install with `poetry install --extras embeddings-ollama`" "Local dependencies not found, install with `poetry install --extras embeddings-ollama`"
) from e ) from e
ollama_settings = settings.ollama ollama_settings = settings.ollama
# Calculate embedding model. If not provided tag, it will be use latest
model_name = (
ollama_settings.embedding_model + ":latest"
if ":" not in ollama_settings.embedding_model
else ollama_settings.embedding_model
)
self.embedding_model = OllamaEmbedding( self.embedding_model = OllamaEmbedding(
model_name=ollama_settings.embedding_model, model_name=model_name,
base_url=ollama_settings.embedding_api_base, base_url=ollama_settings.embedding_api_base,
) )
if ollama_settings.autopull_models:
if ollama_settings.autopull_models:
from private_gpt.utils.ollama import (
check_connection,
pull_model,
)
# TODO: Reuse llama-index client when llama-index is updated
client = Client(
host=ollama_settings.embedding_api_base,
timeout=ollama_settings.request_timeout,
)
if not check_connection(client):
raise ValueError(
f"Failed to connect to Ollama, "
f"check if Ollama server is running on {ollama_settings.api_base}"
)
pull_model(client, model_name)
case "azopenai": case "azopenai":
try: try:
from llama_index.embeddings.azure_openai import ( # type: ignore from llama_index.embeddings.azure_openai import ( # type: ignore
@ -99,6 +129,20 @@ class EmbeddingComponent:
azure_endpoint=azopenai_settings.azure_endpoint, azure_endpoint=azopenai_settings.azure_endpoint,
api_version=azopenai_settings.api_version, api_version=azopenai_settings.api_version,
) )
case "gemini":
try:
from llama_index.embeddings.gemini import ( # type: ignore
GeminiEmbedding,
)
except ImportError as e:
raise ImportError(
"Gemini dependencies not found, install with `poetry install --extras embeddings-gemini`"
) from e
self.embedding_model = GeminiEmbedding(
api_key=settings.gemini.api_key,
model_name=settings.gemini.embedding_model,
)
case "mock": case "mock":
# Not a random number, is the dimensionality used by # Not a random number, is the dimensionality used by
# the default embedding model # the default embedding model

View file

@ -35,10 +35,10 @@ class LLMComponent:
) )
except Exception as e: except Exception as e:
logger.warning( logger.warning(
"Failed to download tokenizer %s. Falling back to " f"Failed to download tokenizer {settings.llm.tokenizer}: {e!s}"
"default tokenizer.", f"Please follow the instructions in the documentation to download it if needed: "
settings.llm.tokenizer, f"https://docs.privategpt.dev/installation/getting-started/troubleshooting#tokenizer-setup."
e, f"Falling back to default tokenizer."
) )
logger.info("Initializing the LLM in mode=%s", llm_mode) logger.info("Initializing the LLM in mode=%s", llm_mode)
@ -146,8 +146,15 @@ class LLMComponent:
"repeat_penalty": ollama_settings.repeat_penalty, # ollama llama-cpp "repeat_penalty": ollama_settings.repeat_penalty, # ollama llama-cpp
} }
self.llm = Ollama( # calculate llm model. If not provided tag, it will be use latest
model=ollama_settings.llm_model, model_name = (
ollama_settings.llm_model + ":latest"
if ":" not in ollama_settings.llm_model
else ollama_settings.llm_model
)
llm = Ollama(
model=model_name,
base_url=ollama_settings.api_base, base_url=ollama_settings.api_base,
temperature=settings.llm.temperature, temperature=settings.llm.temperature,
context_window=settings.llm.context_window, context_window=settings.llm.context_window,
@ -155,6 +162,16 @@ class LLMComponent:
request_timeout=ollama_settings.request_timeout, request_timeout=ollama_settings.request_timeout,
) )
if ollama_settings.autopull_models:
from private_gpt.utils.ollama import check_connection, pull_model
if not check_connection(llm.client):
raise ValueError(
f"Failed to connect to Ollama, "
f"check if Ollama server is running on {ollama_settings.api_base}"
)
pull_model(llm.client, model_name)
if ( if (
ollama_settings.keep_alive ollama_settings.keep_alive
!= ollama_settings.model_fields["keep_alive"].default != ollama_settings.model_fields["keep_alive"].default
@ -172,6 +189,8 @@ class LLMComponent:
Ollama.complete = add_keep_alive(Ollama.complete) Ollama.complete = add_keep_alive(Ollama.complete)
Ollama.stream_complete = add_keep_alive(Ollama.stream_complete) Ollama.stream_complete = add_keep_alive(Ollama.stream_complete)
self.llm = llm
case "azopenai": case "azopenai":
try: try:
from llama_index.llms.azure_openai import ( # type: ignore from llama_index.llms.azure_openai import ( # type: ignore
@ -190,5 +209,18 @@ class LLMComponent:
azure_endpoint=azopenai_settings.azure_endpoint, azure_endpoint=azopenai_settings.azure_endpoint,
api_version=azopenai_settings.api_version, api_version=azopenai_settings.api_version,
) )
case "gemini":
try:
from llama_index.llms.gemini import ( # type: ignore
Gemini,
)
except ImportError as e:
raise ImportError(
"Google Gemini dependencies not found, install with `poetry install --extras llms-gemini`"
) from e
gemini_settings = settings.gemini
self.llm = Gemini(
model_name=gemini_settings.model, api_key=gemini_settings.api_key
)
case "mock": case "mock":
self.llm = MockLLM() self.llm = MockLLM()

View file

@ -121,6 +121,72 @@ class VectorStoreComponent:
collection_name="make_this_parameterizable_per_api_call", collection_name="make_this_parameterizable_per_api_call",
), # TODO ), # TODO
) )
case "milvus":
try:
from llama_index.vector_stores.milvus import ( # type: ignore
MilvusVectorStore,
)
except ImportError as e:
raise ImportError(
"Milvus dependencies not found, install with `poetry install --extras vector-stores-milvus`"
) from e
if settings.milvus is None:
logger.info(
"Milvus config not found. Using default settings.\n"
"Trying to connect to Milvus at local_data/private_gpt/milvus/milvus_local.db "
"with collection 'make_this_parameterizable_per_api_call'."
)
self.vector_store = typing.cast(
BasePydanticVectorStore,
MilvusVectorStore(
dim=settings.embedding.embed_dim,
collection_name="make_this_parameterizable_per_api_call",
overwrite=True,
),
)
else:
self.vector_store = typing.cast(
BasePydanticVectorStore,
MilvusVectorStore(
dim=settings.embedding.embed_dim,
uri=settings.milvus.uri,
token=settings.milvus.token,
collection_name=settings.milvus.collection_name,
overwrite=settings.milvus.overwrite,
),
)
case "clickhouse":
try:
from clickhouse_connect import ( # type: ignore
get_client,
)
from llama_index.vector_stores.clickhouse import ( # type: ignore
ClickHouseVectorStore,
)
except ImportError as e:
raise ImportError(
"ClickHouse dependencies not found, install with `poetry install --extras vector-stores-clickhouse`"
) from e
if settings.clickhouse is None:
raise ValueError(
"ClickHouse settings not found. Please provide settings."
)
clickhouse_client = get_client(
host=settings.clickhouse.host,
port=settings.clickhouse.port,
username=settings.clickhouse.username,
password=settings.clickhouse.password,
)
self.vector_store = ClickHouseVectorStore(
clickhouse_client=clickhouse_client
)
case _: case _:
# Should be unreachable # Should be unreachable
# The settings validator should have caught this # The settings validator should have caught this

View file

@ -1,4 +1,4 @@
from typing import Literal from typing import Any, Literal
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -82,7 +82,14 @@ class DataSettings(BaseModel):
class LLMSettings(BaseModel): class LLMSettings(BaseModel):
mode: Literal[ mode: Literal[
"llamacpp", "openai", "openailike", "azopenai", "sagemaker", "mock", "ollama" "llamacpp",
"openai",
"openailike",
"azopenai",
"sagemaker",
"mock",
"ollama",
"gemini",
] ]
max_new_tokens: int = Field( max_new_tokens: int = Field(
256, 256,
@ -121,7 +128,7 @@ class LLMSettings(BaseModel):
class VectorstoreSettings(BaseModel): class VectorstoreSettings(BaseModel):
database: Literal["chroma", "qdrant", "postgres"] database: Literal["chroma", "qdrant", "postgres", "clickhouse", "milvus"]
class NodeStoreSettings(BaseModel): class NodeStoreSettings(BaseModel):
@ -160,7 +167,9 @@ class HuggingFaceSettings(BaseModel):
class EmbeddingSettings(BaseModel): class EmbeddingSettings(BaseModel):
mode: Literal["huggingface", "openai", "azopenai", "sagemaker", "ollama", "mock"] mode: Literal[
"huggingface", "openai", "azopenai", "sagemaker", "ollama", "mock", "gemini"
]
ingest_mode: Literal["simple", "batch", "parallel", "pipeline"] = Field( ingest_mode: Literal["simple", "batch", "parallel", "pipeline"] = Field(
"simple", "simple",
description=( description=(
@ -223,6 +232,18 @@ class OpenAISettings(BaseModel):
) )
class GeminiSettings(BaseModel):
api_key: str
model: str = Field(
"models/gemini-pro",
description="Google Model to use. Example: 'models/gemini-pro'.",
)
embedding_model: str = Field(
"models/embedding-001",
description="Google Embedding Model to use. Example: 'models/embedding-001'.",
)
class OllamaSettings(BaseModel): class OllamaSettings(BaseModel):
api_base: str = Field( api_base: str = Field(
"http://localhost:11434", "http://localhost:11434",
@ -272,6 +293,10 @@ class OllamaSettings(BaseModel):
120.0, 120.0,
description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ", description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ",
) )
autopull_models: bool = Field(
False,
description="If set to True, the Ollama will automatically pull the models from the API base.",
)
class AzureOpenAISettings(BaseModel): class AzureOpenAISettings(BaseModel):
@ -338,6 +363,77 @@ class RagSettings(BaseModel):
rerank: RerankSettings rerank: RerankSettings
class ClickHouseSettings(BaseModel):
host: str = Field(
"localhost",
description="The server hosting the ClickHouse database",
)
port: int = Field(
8443,
description="The port on which the ClickHouse database is accessible",
)
username: str = Field(
"default",
description="The username to use to connect to the ClickHouse database",
)
password: str = Field(
"",
description="The password to use to connect to the ClickHouse database",
)
database: str = Field(
"__default__",
description="The default database to use for connections",
)
secure: bool | str = Field(
False,
description="Use https/TLS for secure connection to the server",
)
interface: str | None = Field(
None,
description="Must be either 'http' or 'https'. Determines the protocol to use for the connection",
)
settings: dict[str, Any] | None = Field(
None,
description="Specific ClickHouse server settings to be used with the session",
)
connect_timeout: int | None = Field(
None,
description="Timeout in seconds for establishing a connection",
)
send_receive_timeout: int | None = Field(
None,
description="Read timeout in seconds for http connection",
)
verify: bool | None = Field(
None,
description="Verify the server certificate in secure/https mode",
)
ca_cert: str | None = Field(
None,
description="Path to Certificate Authority root certificate (.pem format)",
)
client_cert: str | None = Field(
None,
description="Path to TLS Client certificate (.pem format)",
)
client_cert_key: str | None = Field(
None,
description="Path to the private key for the TLS Client certificate",
)
http_proxy: str | None = Field(
None,
description="HTTP proxy address",
)
https_proxy: str | None = Field(
None,
description="HTTPS proxy address",
)
server_host_name: str | None = Field(
None,
description="Server host name to be checked against the TLS certificate",
)
class PostgresSettings(BaseModel): class PostgresSettings(BaseModel):
host: str = Field( host: str = Field(
"localhost", "localhost",
@ -419,6 +515,27 @@ class QdrantSettings(BaseModel):
) )
class MilvusSettings(BaseModel):
uri: str = Field(
"local_data/private_gpt/milvus/milvus_local.db",
description="The URI of the Milvus instance. For example: 'local_data/private_gpt/milvus/milvus_local.db' for Milvus Lite.",
)
token: str = Field(
"",
description=(
"A valid access token to access the specified Milvus instance. "
"This can be used as a recommended alternative to setting user and password separately. "
),
)
collection_name: str = Field(
"make_this_parameterizable_per_api_call",
description="The name of the collection in Milvus. Default is 'make_this_parameterizable_per_api_call'.",
)
overwrite: bool = Field(
True, description="Overwrite the previous collection schema if it exists."
)
class Settings(BaseModel): class Settings(BaseModel):
server: ServerSettings server: ServerSettings
data: DataSettings data: DataSettings
@ -429,6 +546,7 @@ class Settings(BaseModel):
huggingface: HuggingFaceSettings huggingface: HuggingFaceSettings
sagemaker: SagemakerSettings sagemaker: SagemakerSettings
openai: OpenAISettings openai: OpenAISettings
gemini: GeminiSettings
ollama: OllamaSettings ollama: OllamaSettings
azopenai: AzureOpenAISettings azopenai: AzureOpenAISettings
vectorstore: VectorstoreSettings vectorstore: VectorstoreSettings
@ -436,6 +554,8 @@ class Settings(BaseModel):
rag: RagSettings rag: RagSettings
qdrant: QdrantSettings | None = None qdrant: QdrantSettings | None = None
postgres: PostgresSettings | None = None postgres: PostgresSettings | None = None
clickhouse: ClickHouseSettings | None = None
milvus: MilvusSettings | None = None
""" """

View file

@ -444,6 +444,7 @@ class PrivateGptUi:
"sagemaker": config_settings.sagemaker.llm_endpoint_name, "sagemaker": config_settings.sagemaker.llm_endpoint_name,
"mock": llm_mode, "mock": llm_mode,
"ollama": config_settings.ollama.llm_model, "ollama": config_settings.ollama.llm_model,
"gemini": config_settings.gemini.model,
} }
if llm_mode not in model_mapping: if llm_mode not in model_mapping:

View file

@ -0,0 +1,32 @@
import logging
try:
from ollama import Client # type: ignore
except ImportError as e:
raise ImportError(
"Ollama dependencies not found, install with `poetry install --extras llms-ollama or embeddings-ollama`"
) from e
logger = logging.getLogger(__name__)
def check_connection(client: Client) -> bool:
try:
client.list()
return True
except Exception as e:
logger.error(f"Failed to connect to Ollama: {e!s}")
return False
def pull_model(client: Client, model_name: str, raise_error: bool = True) -> None:
try:
installed_models = [model["name"] for model in client.list().get("models", {})]
if model_name not in installed_models:
logger.info(f"Pulling model {model_name}. Please wait...")
client.pull(model_name)
logger.info(f"Model {model_name} pulled successfully")
except Exception as e:
logger.error(f"Failed to pull model {model_name}: {e!s}")
if raise_error:
raise e

View file

@ -22,21 +22,28 @@ llama-index-readers-file = "^0.1.27"
llama-index-llms-llama-cpp = {version = "^0.1.4", optional = true} llama-index-llms-llama-cpp = {version = "^0.1.4", optional = true}
llama-index-llms-openai = {version = "^0.1.25", optional = true} llama-index-llms-openai = {version = "^0.1.25", optional = true}
llama-index-llms-openai-like = {version ="^0.1.3", optional = true} llama-index-llms-openai-like = {version ="^0.1.3", optional = true}
llama-index-llms-ollama = {version ="^0.1.5", optional = true} llama-index-llms-ollama = {version ="^0.2.2", optional = true}
llama-index-llms-azure-openai = {version ="^0.1.8", optional = true} llama-index-llms-azure-openai = {version ="^0.1.8", optional = true}
llama-index-llms-gemini = {version ="^0.1.11", optional = true}
llama-index-embeddings-ollama = {version ="^0.1.2", optional = true} llama-index-embeddings-ollama = {version ="^0.1.2", optional = true}
llama-index-embeddings-huggingface = {version ="^0.2.2", optional = true} llama-index-embeddings-huggingface = {version ="^0.2.2", optional = true}
llama-index-embeddings-openai = {version ="^0.1.10", optional = true} llama-index-embeddings-openai = {version ="^0.1.10", optional = true}
llama-index-embeddings-azure-openai = {version ="^0.1.10", optional = true} llama-index-embeddings-azure-openai = {version ="^0.1.10", optional = true}
llama-index-embeddings-gemini = {version ="^0.1.8", optional = true}
llama-index-vector-stores-qdrant = {version ="^0.2.10", optional = true} llama-index-vector-stores-qdrant = {version ="^0.2.10", optional = true}
llama-index-vector-stores-milvus = {version ="^0.1.20", optional = true}
llama-index-vector-stores-chroma = {version ="^0.1.10", optional = true} llama-index-vector-stores-chroma = {version ="^0.1.10", optional = true}
llama-index-vector-stores-postgres = {version ="^0.1.11", optional = true} llama-index-vector-stores-postgres = {version ="^0.1.11", optional = true}
llama-index-vector-stores-clickhouse = {version ="^0.1.3", optional = true}
llama-index-storage-docstore-postgres = {version ="^0.1.3", optional = true} llama-index-storage-docstore-postgres = {version ="^0.1.3", optional = true}
llama-index-storage-index-store-postgres = {version ="^0.1.4", optional = true} llama-index-storage-index-store-postgres = {version ="^0.1.4", optional = true}
# Postgres # Postgres
psycopg2-binary = {version ="^2.9.9", optional = true} psycopg2-binary = {version ="^2.9.9", optional = true}
asyncpg = {version="^0.29.0", optional = true} asyncpg = {version="^0.29.0", optional = true}
# ClickHouse
clickhouse-connect = {version = "^0.7.15", optional = true}
# Optional Sagemaker dependency # Optional Sagemaker dependency
boto3 = {version ="^1.34.139", optional = true} boto3 = {version ="^1.34.139", optional = true}
@ -49,23 +56,35 @@ sentence-transformers = {version ="^3.0.1", optional = true}
# Optional UI # Optional UI
gradio = {version ="^4.37.2", optional = true} gradio = {version ="^4.37.2", optional = true}
# Fix: https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/16289#issuecomment-2255106490
ffmpy = {git = "https://github.com/EuDs63/ffmpy.git", rev = "333a19ee4d21f32537c0508aa1942ef1aa7afe24", optional = true}
# Optional Google Gemini dependency
google-generativeai = {version ="^0.5.4", optional = true}
# Optional Ollama client
ollama = {version ="^0.3.0", optional = true}
[tool.poetry.extras] [tool.poetry.extras]
ui = ["gradio"] ui = ["gradio", "ffmpy"]
llms-llama-cpp = ["llama-index-llms-llama-cpp"] llms-llama-cpp = ["llama-index-llms-llama-cpp"]
llms-openai = ["llama-index-llms-openai"] llms-openai = ["llama-index-llms-openai"]
llms-openai-like = ["llama-index-llms-openai-like"] llms-openai-like = ["llama-index-llms-openai-like"]
llms-ollama = ["llama-index-llms-ollama"] llms-ollama = ["llama-index-llms-ollama", "ollama"]
llms-sagemaker = ["boto3"] llms-sagemaker = ["boto3"]
llms-azopenai = ["llama-index-llms-azure-openai"] llms-azopenai = ["llama-index-llms-azure-openai"]
embeddings-ollama = ["llama-index-embeddings-ollama"] llms-gemini = ["llama-index-llms-gemini", "google-generativeai"]
embeddings-ollama = ["llama-index-embeddings-ollama", "ollama"]
embeddings-huggingface = ["llama-index-embeddings-huggingface"] embeddings-huggingface = ["llama-index-embeddings-huggingface"]
embeddings-openai = ["llama-index-embeddings-openai"] embeddings-openai = ["llama-index-embeddings-openai"]
embeddings-sagemaker = ["boto3"] embeddings-sagemaker = ["boto3"]
embeddings-azopenai = ["llama-index-embeddings-azure-openai"] embeddings-azopenai = ["llama-index-embeddings-azure-openai"]
embeddings-gemini = ["llama-index-embeddings-gemini"]
vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
vector-stores-clickhouse = ["llama-index-vector-stores-clickhouse", "clickhouse_connect"]
vector-stores-chroma = ["llama-index-vector-stores-chroma"] vector-stores-chroma = ["llama-index-vector-stores-chroma"]
vector-stores-postgres = ["llama-index-vector-stores-postgres"] vector-stores-postgres = ["llama-index-vector-stores-postgres"]
vector-stores-milvus = ["llama-index-vector-stores-milvus"]
storage-nodestore-postgres = ["llama-index-storage-docstore-postgres","llama-index-storage-index-store-postgres","psycopg2-binary","asyncpg"] storage-nodestore-postgres = ["llama-index-storage-docstore-postgres","llama-index-storage-index-store-postgres","psycopg2-binary","asyncpg"]
rerank-sentence-transformers = ["torch", "sentence-transformers"] rerank-sentence-transformers = ["torch", "sentence-transformers"]

View file

@ -24,6 +24,7 @@ snapshot_download(
repo_id=settings().huggingface.embedding_hf_model_name, repo_id=settings().huggingface.embedding_hf_model_name,
cache_dir=models_cache_path, cache_dir=models_cache_path,
local_dir=embedding_path, local_dir=embedding_path,
token=settings().huggingface.access_token,
) )
print("Embedding model downloaded!") print("Embedding model downloaded!")
@ -35,14 +36,17 @@ hf_hub_download(
cache_dir=models_cache_path, cache_dir=models_cache_path,
local_dir=models_path, local_dir=models_path,
resume_download=resume_download, resume_download=resume_download,
token=settings().huggingface.access_token,
) )
print("LLM model downloaded!") print("LLM model downloaded!")
# Download Tokenizer # Download Tokenizer
if settings().llm.tokenizer:
print(f"Downloading tokenizer {settings().llm.tokenizer}") print(f"Downloading tokenizer {settings().llm.tokenizer}")
AutoTokenizer.from_pretrained( AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=settings().llm.tokenizer, pretrained_model_name_or_path=settings().llm.tokenizer,
cache_dir=models_cache_path, cache_dir=models_cache_path,
token=settings().huggingface.access_token,
) )
print("Tokenizer downloaded!") print("Tokenizer downloaded!")

10
settings-gemini.yaml Normal file
View file

@ -0,0 +1,10 @@
llm:
mode: gemini
embedding:
mode: gemini
gemini:
api_key: ${GOOGLE_API_KEY:}
model: models/gemini-pro
embedding_model: models/embedding-001

View file

@ -40,7 +40,8 @@ llm:
# Should be matching the selected model # Should be matching the selected model
max_new_tokens: 512 max_new_tokens: 512
context_window: 3900 context_window: 3900
tokenizer: mistralai/Mistral-7B-Instruct-v0.2 # Select your tokenizer. Llama-index tokenizer is the default.
# tokenizer: mistralai/Mistral-7B-Instruct-v0.2
temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
rag: rag:
@ -53,6 +54,13 @@ rag:
model: cross-encoder/ms-marco-MiniLM-L-2-v2 model: cross-encoder/ms-marco-MiniLM-L-2-v2
top_n: 1 top_n: 1
clickhouse:
host: localhost
port: 8443
username: admin
password: clickhouse
database: embeddings
llamacpp: llamacpp:
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF
llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf
@ -69,7 +77,7 @@ embedding:
huggingface: huggingface:
embedding_hf_model_name: BAAI/bge-small-en-v1.5 embedding_hf_model_name: BAAI/bge-small-en-v1.5
access_token: ${HUGGINGFACE_TOKEN:} access_token: ${HF_TOKEN:}
vectorstore: vectorstore:
database: qdrant database: qdrant
@ -77,6 +85,11 @@ vectorstore:
nodestore: nodestore:
database: simple database: simple
milvus:
uri: local_data/private_gpt/milvus/milvus_local.db
collection_name: milvus_db
overwrite: false
qdrant: qdrant:
path: local_data/private_gpt/qdrant path: local_data/private_gpt/qdrant
@ -104,6 +117,7 @@ ollama:
embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama embedding_api_base: http://localhost:11434 # change if your embedding model runs on another ollama
keep_alive: 5m keep_alive: 5m
request_timeout: 120.0 request_timeout: 120.0
autopull_models: true
azopenai: azopenai:
api_key: ${AZ_OPENAI_API_KEY:} api_key: ${AZ_OPENAI_API_KEY:}
@ -113,3 +127,8 @@ azopenai:
api_version: "2023-05-15" api_version: "2023-05-15"
embedding_model: text-embedding-ada-002 embedding_model: text-embedding-ada-002
llm_model: gpt-35-turbo llm_model: gpt-35-turbo
gemini:
api_key: ${GOOGLE_API_KEY:}
model: models/gemini-pro
embedding_model: models/embedding-001