mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 17:05:41 +01:00
Merge branch 'main' into feat/llama3.1
# Conflicts: # Dockerfile.external # Dockerfile.local
This commit is contained in:
commit
f3ee5f8e66
4 changed files with 48 additions and 22 deletions
|
|
@ -14,27 +14,38 @@ FROM base as dependencies
|
|||
WORKDIR /home/worker/app
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
|
||||
RUN poetry install --extras "ui vector-stores-qdrant llms-ollama embeddings-ollama"
|
||||
ARG POETRY_EXTRAS="ui vector-stores-qdrant llms-ollama embeddings-ollama"
|
||||
RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
|
||||
|
||||
FROM base as app
|
||||
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PORT=8080
|
||||
ENV APP_ENV=prod
|
||||
ENV PYTHONPATH="$PYTHONPATH:/home/worker/app/private_gpt/"
|
||||
EXPOSE 8080
|
||||
|
||||
# Prepare a non-root user
|
||||
RUN adduser --system worker
|
||||
# More info about how to configure UIDs and GIDs in Docker:
|
||||
# https://github.com/systemd/systemd/blob/main/docs/UIDS-GIDS.md
|
||||
|
||||
# Define the User ID (UID) for the non-root user
|
||||
# UID 100 is chosen to avoid conflicts with existing system users
|
||||
ARG UID=100
|
||||
|
||||
# Define the Group ID (GID) for the non-root user
|
||||
# GID 65534 is often used for the 'nogroup' or 'nobody' group
|
||||
ARG GID=65534
|
||||
|
||||
RUN adduser --system --gid ${GID} --uid ${UID} --home /home/worker worker
|
||||
WORKDIR /home/worker/app
|
||||
|
||||
RUN mkdir local_data; chown worker local_data
|
||||
RUN mkdir models; chown worker models
|
||||
RUN chown worker /home/worker/app
|
||||
RUN mkdir local_data && chown worker local_data
|
||||
RUN mkdir models && chown worker models
|
||||
COPY --chown=worker --from=dependencies /home/worker/app/.venv/ .venv
|
||||
COPY --chown=worker private_gpt/ private_gpt
|
||||
COPY --chown=worker fern/ fern
|
||||
COPY --chown=worker *.yaml *.md ./
|
||||
COPY --chown=worker *.yaml .
|
||||
COPY --chown=worker scripts/ scripts
|
||||
|
||||
ENV PYTHONPATH="$PYTHONPATH:/private_gpt/"
|
||||
|
||||
USER worker
|
||||
ENTRYPOINT python -m private_gpt
|
||||
|
|
@ -24,28 +24,39 @@ FROM base as dependencies
|
|||
WORKDIR /home/worker/app
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
|
||||
RUN poetry install --extras "ui embeddings-huggingface llms-llama-cpp vector-stores-qdrant"
|
||||
ARG POETRY_EXTRAS="ui embeddings-huggingface llms-llama-cpp vector-stores-qdrant"
|
||||
RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
|
||||
|
||||
FROM base as app
|
||||
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PORT=8080
|
||||
ENV APP_ENV=prod
|
||||
ENV PYTHONPATH="$PYTHONPATH:/home/worker/app/private_gpt/"
|
||||
EXPOSE 8080
|
||||
|
||||
# Prepare a non-root user
|
||||
RUN adduser --group worker
|
||||
RUN adduser --system --ingroup worker worker
|
||||
# More info about how to configure UIDs and GIDs in Docker:
|
||||
# https://github.com/systemd/systemd/blob/main/docs/UIDS-GIDS.md
|
||||
|
||||
# Define the User ID (UID) for the non-root user
|
||||
# UID 100 is chosen to avoid conflicts with existing system users
|
||||
ARG UID=100
|
||||
|
||||
# Define the Group ID (GID) for the non-root user
|
||||
# GID 65534 is often used for the 'nogroup' or 'nobody' group
|
||||
ARG GID=65534
|
||||
|
||||
RUN adduser --system --gid ${GID} --uid ${UID} --home /home/worker worker
|
||||
WORKDIR /home/worker/app
|
||||
|
||||
RUN mkdir local_data; chown worker local_data
|
||||
RUN mkdir models; chown worker models
|
||||
RUN chown worker /home/worker/app
|
||||
RUN mkdir local_data && chown worker local_data
|
||||
RUN mkdir models && chown worker models
|
||||
COPY --chown=worker --from=dependencies /home/worker/app/.venv/ .venv
|
||||
COPY --chown=worker private_gpt/ private_gpt
|
||||
COPY --chown=worker fern/ fern
|
||||
COPY --chown=worker *.yaml *.md ./
|
||||
COPY --chown=worker *.yaml ./
|
||||
COPY --chown=worker scripts/ scripts
|
||||
|
||||
ENV PYTHONPATH="$PYTHONPATH:/private_gpt/"
|
||||
|
||||
USER worker
|
||||
ENTRYPOINT python -m private_gpt
|
||||
|
|
@ -5,12 +5,15 @@ services:
|
|||
volumes:
|
||||
- ./local_data/:/home/worker/app/local_data
|
||||
ports:
|
||||
- 8001:8080
|
||||
- 8001:8001
|
||||
environment:
|
||||
PORT: 8080
|
||||
PORT: 8001
|
||||
PGPT_PROFILES: docker
|
||||
PGPT_MODE: ollama
|
||||
PGPT_EMBED_MODE: ollama
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
ports:
|
||||
- 11434:11434
|
||||
volumes:
|
||||
- ./models:/root/.ollama
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ llm:
|
|||
mode: ${PGPT_MODE:mock}
|
||||
|
||||
embedding:
|
||||
mode: ${PGPT_MODE:sagemaker}
|
||||
mode: ${PGPT_EMBED_MODE:mock}
|
||||
|
||||
llamacpp:
|
||||
llm_hf_repo_id: ${PGPT_HF_REPO_ID:lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF}
|
||||
|
|
@ -30,6 +30,7 @@ ollama:
|
|||
repeat_last_n: ${PGPT_OLLAMA_REPEAT_LAST_N:64}
|
||||
repeat_penalty: ${PGPT_OLLAMA_REPEAT_PENALTY:1.2}
|
||||
request_timeout: ${PGPT_OLLAMA_REQUEST_TIMEOUT:600.0}
|
||||
autopull_models: ${PGPT_OLLAMA_AUTOPULL_MODELS:true}
|
||||
|
||||
ui:
|
||||
enabled: true
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue