feat: Upgrade to LlamaIndex to 0.10 (#1663)

* Extract optional dependencies

* Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity

* Support Ollama embeddings

* Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine

* Fix vector retriever filters
This commit is contained in:
Iván Martínez 2024-03-06 17:51:30 +01:00 committed by GitHub
parent 12f3a39e8a
commit 45f05711eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
43 changed files with 1474 additions and 1396 deletions

View file

@ -1,25 +1,52 @@
[tool.poetry]
name = "private-gpt"
version = "0.2.0"
version = "0.4.0"
description = "Private GPT"
authors = ["Zylon <hi@zylon.ai>"]
[tool.poetry.dependencies]
python = ">=3.11,<3.12"
fastapi = { extras = ["all"], version = "^0.103.1" }
boto3 = "^1.28.56"
# PrivateGPT
fastapi = { extras = ["all"], version = "^0.110.0" }
python-multipart = "^0.0.9"
injector = "^0.21.0"
pyyaml = "^6.0.1"
python-multipart = "^0.0.6"
pypdf = "^3.16.2"
llama-index = { extras = ["local_models"], version = "0.9.3" }
watchdog = "^3.0.0"
qdrant-client = "^1.6.9"
chromadb = {version = "^0.4.13", optional = true}
asyncpg = {version = "^0.29.0", optional = true}
pgvector = {version = "^0.2.5", optional = true}
psycopg2-binary = {version = "^2.9.9", optional = true}
sqlalchemy = {version = "^2.0.27", optional = true}
watchdog = "^4.0.0"
transformers = "^4.38.2"
# LlamaIndex core libs
llama-index-core = "^0.10.14"
llama-index-readers-file = "^0.1.6"
# Optional LlamaIndex integration libs
llama-index-llms-llama-cpp = {version = "^0.1.3", optional = true}
llama-index-llms-openai = {version = "^0.1.6", optional = true}
llama-index-llms-openai-like = {version ="^0.1.3", optional = true}
llama-index-llms-ollama = {version ="^0.1.2", optional = true}
llama-index-embeddings-ollama = {version ="^0.1.2", optional = true}
llama-index-embeddings-huggingface = {version ="^0.1.4", optional = true}
llama-index-embeddings-openai = {version ="^0.1.6", optional = true}
llama-index-vector-stores-qdrant = {version ="^0.1.3", optional = true}
llama-index-vector-stores-chroma = {version ="^0.1.4", optional = true}
llama-index-vector-stores-postgres = {version ="^0.1.2", optional = true}
# Optional Sagemaker dependency
boto3 = {version ="^1.34.51", optional = true}
# Optional UI
gradio = {version ="^4.19.2", optional = true}
[tool.poetry.extras]
ui = ["gradio"]
llms-llama-cpp = ["llama-index-llms-llama-cpp"]
llms-openai = ["llama-index-llms-openai"]
llms-openai-like = ["llama-index-llms-openai-like"]
llms-ollama = ["llama-index-llms-ollama"]
llms-sagemaker = ["boto3"]
embeddings-ollama = ["llama-index-embeddings-ollama"]
embeddings-huggingface = ["llama-index-embeddings-huggingface"]
embeddings-openai = ["llama-index-embeddings-openai"]
embeddings-sagemaker = ["boto3"]
vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
vector-stores-chroma = ["llama-index-vector-stores-chroma"]
vector-stores-postgres = ["llama-index-vector-stores-postgres"]
[tool.poetry.group.dev.dependencies]
black = "^22"
@ -31,26 +58,6 @@ ruff = "^0"
pytest-asyncio = "^0.21.1"
types-pyyaml = "^6.0.12.12"
# Dependencies for gradio UI
[tool.poetry.group.ui]
optional = true
[tool.poetry.group.ui.dependencies]
gradio = "^4.19.0"
[tool.poetry.group.local]
optional = true
[tool.poetry.group.local.dependencies]
llama-cpp-python = "^0.2.23"
numpy = "1.26.0"
sentence-transformers = "^2.2.2"
# https://stackoverflow.com/questions/76327419/valueerror-libcublas-so-0-9-not-found-in-the-system-path
torch = ">=2.0.0, !=2.0.1, !=2.1.0"
transformers = "^4.34.0"
[tool.poetry.extras]
chroma = ["chromadb"]
pgvector = ["sqlalchemy", "pgvector", "psycopg2-binary", "asyncpg"]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
@ -143,6 +150,9 @@ explicit_package_bases = true
warn_unused_ignores = false
exclude = ["tests"]
[tool.mypy-llama-index]
ignore_missing_imports = true
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]