chore: install always ollama client

This commit is contained in:
Javier Martinez 2024-07-29 11:37:46 +02:00
parent c096a42aa1
commit 47f4524fe9
No known key found for this signature in database
2 changed files with 8 additions and 5 deletions

6
poetry.lock generated
View file

@ -6854,13 +6854,13 @@ cffi = ["cffi (>=1.11)"]
embeddings-azopenai = ["llama-index-embeddings-azure-openai"] embeddings-azopenai = ["llama-index-embeddings-azure-openai"]
embeddings-gemini = ["llama-index-embeddings-gemini"] embeddings-gemini = ["llama-index-embeddings-gemini"]
embeddings-huggingface = ["llama-index-embeddings-huggingface"] embeddings-huggingface = ["llama-index-embeddings-huggingface"]
embeddings-ollama = ["llama-index-embeddings-ollama"] embeddings-ollama = ["llama-index-embeddings-ollama", "ollama"]
embeddings-openai = ["llama-index-embeddings-openai"] embeddings-openai = ["llama-index-embeddings-openai"]
embeddings-sagemaker = ["boto3"] embeddings-sagemaker = ["boto3"]
llms-azopenai = ["llama-index-llms-azure-openai"] llms-azopenai = ["llama-index-llms-azure-openai"]
llms-gemini = ["google-generativeai", "llama-index-llms-gemini"] llms-gemini = ["google-generativeai", "llama-index-llms-gemini"]
llms-llama-cpp = ["llama-index-llms-llama-cpp"] llms-llama-cpp = ["llama-index-llms-llama-cpp"]
llms-ollama = ["llama-index-llms-ollama"] llms-ollama = ["llama-index-llms-ollama", "ollama"]
llms-openai = ["llama-index-llms-openai"] llms-openai = ["llama-index-llms-openai"]
llms-openai-like = ["llama-index-llms-openai-like"] llms-openai-like = ["llama-index-llms-openai-like"]
llms-sagemaker = ["boto3"] llms-sagemaker = ["boto3"]
@ -6876,4 +6876,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.11,<3.12" python-versions = ">=3.11,<3.12"
content-hash = "570af23ceef0f40e5e497a7e7b46a3ddfda37b9f9dd17f71669e5cb3aac1cd55" content-hash = "77b51b8858f22cdbc13736d141a39cc84e30cd570ab802f2597969927fe9530d"

View file

@ -60,16 +60,19 @@ gradio = {version ="^4.37.2", optional = true}
# Optional Google Gemini dependency # Optional Google Gemini dependency
google-generativeai = {version ="^0.5.4", optional = true} google-generativeai = {version ="^0.5.4", optional = true}
# Optional Ollama client
ollama = {version ="^0.3.0", optional = true}
[tool.poetry.extras] [tool.poetry.extras]
ui = ["gradio"] ui = ["gradio"]
llms-llama-cpp = ["llama-index-llms-llama-cpp"] llms-llama-cpp = ["llama-index-llms-llama-cpp"]
llms-openai = ["llama-index-llms-openai"] llms-openai = ["llama-index-llms-openai"]
llms-openai-like = ["llama-index-llms-openai-like"] llms-openai-like = ["llama-index-llms-openai-like"]
llms-ollama = ["llama-index-llms-ollama"] llms-ollama = ["llama-index-llms-ollama", "ollama"]
llms-sagemaker = ["boto3"] llms-sagemaker = ["boto3"]
llms-azopenai = ["llama-index-llms-azure-openai"] llms-azopenai = ["llama-index-llms-azure-openai"]
llms-gemini = ["llama-index-llms-gemini", "google-generativeai"] llms-gemini = ["llama-index-llms-gemini", "google-generativeai"]
embeddings-ollama = ["llama-index-embeddings-ollama"] embeddings-ollama = ["llama-index-embeddings-ollama", "ollama"]
embeddings-huggingface = ["llama-index-embeddings-huggingface"] embeddings-huggingface = ["llama-index-embeddings-huggingface"]
embeddings-openai = ["llama-index-embeddings-openai"] embeddings-openai = ["llama-index-embeddings-openai"]
embeddings-sagemaker = ["boto3"] embeddings-sagemaker = ["boto3"]