mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 10:45:42 +01:00
feat(llm): autopull ollama models (#2019)
Some checks failed
release-please / release-please (push) Waiting to run
tests / setup (push) Waiting to run
tests / ${{ matrix.quality-command }} (black) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (mypy) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (ruff) (push) Blocked by required conditions
tests / test (push) Blocked by required conditions
tests / all_checks_passed (push) Blocked by required conditions
publish docs / publish-docs (push) Has been cancelled
Some checks failed
release-please / release-please (push) Waiting to run
tests / setup (push) Waiting to run
tests / ${{ matrix.quality-command }} (black) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (mypy) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (ruff) (push) Blocked by required conditions
tests / test (push) Blocked by required conditions
tests / all_checks_passed (push) Blocked by required conditions
publish docs / publish-docs (push) Has been cancelled
* chore: update ollama (llm) * feat: allow to autopull ollama models * fix: mypy * chore: install always ollama client * refactor: check connection and pull ollama method to utils * docs: update ollama config with autopulling info
This commit is contained in:
parent
dabf556dae
commit
20bad17c98
8 changed files with 129 additions and 21 deletions
33
poetry.lock
generated
33
poetry.lock
generated
|
|
@ -2022,13 +2022,13 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
|
|||
|
||||
[[package]]
|
||||
name = "httpx"
|
||||
version = "0.25.2"
|
||||
version = "0.27.0"
|
||||
description = "The next generation HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"},
|
||||
{file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"},
|
||||
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
|
||||
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -2625,17 +2625,18 @@ llama-index-core = ">=0.10.1,<0.11.0"
|
|||
|
||||
[[package]]
|
||||
name = "llama-index-llms-ollama"
|
||||
version = "0.1.5"
|
||||
version = "0.2.2"
|
||||
description = "llama-index llms ollama integration"
|
||||
optional = true
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "llama_index_llms_ollama-0.1.5-py3-none-any.whl", hash = "sha256:8e237978765458c9b175d2e25fc25162df8dc70a538b1b9ef9ea18617f8cdf5a"},
|
||||
{file = "llama_index_llms_ollama-0.1.5.tar.gz", hash = "sha256:75697d96c860d87e80cce90c9ea425cbd236918458e0feaaee03597068ba9844"},
|
||||
{file = "llama_index_llms_ollama-0.2.2-py3-none-any.whl", hash = "sha256:c224d7c17d641045bc9b6a6681dab434c1c421af0bacb5825eea444fefd8ed78"},
|
||||
{file = "llama_index_llms_ollama-0.2.2.tar.gz", hash = "sha256:0c7f192cb8b768707bd5154b97e2a41284732d62070eb76190dee125e95245ea"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
llama-index-core = ">=0.10.1,<0.11.0"
|
||||
ollama = ">=0.3.0"
|
||||
|
||||
[[package]]
|
||||
name = "llama-index-llms-openai"
|
||||
|
|
@ -3633,6 +3634,20 @@ rsa = ["cryptography (>=3.0.0)"]
|
|||
signals = ["blinker (>=1.4.0)"]
|
||||
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
|
||||
|
||||
[[package]]
|
||||
name = "ollama"
|
||||
version = "0.3.0"
|
||||
description = "The official Python client for Ollama."
|
||||
optional = true
|
||||
python-versions = "<4.0,>=3.8"
|
||||
files = [
|
||||
{file = "ollama-0.3.0-py3-none-any.whl", hash = "sha256:cd7010c4e2a37d7f08f36cd35c4592b14f1ec0d1bf3df10342cd47963d81ad7a"},
|
||||
{file = "ollama-0.3.0.tar.gz", hash = "sha256:6ff493a2945ba76cdd6b7912a1cd79a45cfd9ba9120d14adeb63b2b5a7f353da"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.27.0,<0.28.0"
|
||||
|
||||
[[package]]
|
||||
name = "onnxruntime"
|
||||
version = "1.17.1"
|
||||
|
|
@ -6844,13 +6859,13 @@ cffi = ["cffi (>=1.11)"]
|
|||
embeddings-azopenai = ["llama-index-embeddings-azure-openai"]
|
||||
embeddings-gemini = ["llama-index-embeddings-gemini"]
|
||||
embeddings-huggingface = ["llama-index-embeddings-huggingface"]
|
||||
embeddings-ollama = ["llama-index-embeddings-ollama"]
|
||||
embeddings-ollama = ["llama-index-embeddings-ollama", "ollama"]
|
||||
embeddings-openai = ["llama-index-embeddings-openai"]
|
||||
embeddings-sagemaker = ["boto3"]
|
||||
llms-azopenai = ["llama-index-llms-azure-openai"]
|
||||
llms-gemini = ["google-generativeai", "llama-index-llms-gemini"]
|
||||
llms-llama-cpp = ["llama-index-llms-llama-cpp"]
|
||||
llms-ollama = ["llama-index-llms-ollama"]
|
||||
llms-ollama = ["llama-index-llms-ollama", "ollama"]
|
||||
llms-openai = ["llama-index-llms-openai"]
|
||||
llms-openai-like = ["llama-index-llms-openai-like"]
|
||||
llms-sagemaker = ["boto3"]
|
||||
|
|
@ -6866,4 +6881,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.11,<3.12"
|
||||
content-hash = "66edb004ccbe7915d68567ea31a7851c87717185e2b504048cc645d1d511a66e"
|
||||
content-hash = "dce5b88d92bcfa047bf1e4c9fe2dbb9c63eb864d6bbca2340801ac0a2f02a8d4"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue