mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 04:30:11 +01:00
Added dockerfile and docker-compose for fireworks
This commit is contained in:
parent
241637f0c4
commit
4d2254648c
2 changed files with 75 additions and 4 deletions
54
Dockerfile.fireworks
Normal file
54
Dockerfile.fireworks
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
FROM python:3.11.6-slim-bookworm as base
|
||||
|
||||
# Install poetry
|
||||
RUN pip install pipx
|
||||
RUN python3 -m pipx ensurepath
|
||||
RUN pipx install poetry==1.8.3
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV PATH=".venv/bin/:$PATH"
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
build-essential
|
||||
|
||||
# https://python-poetry.org/docs/configuration/#virtualenvsin-project
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
|
||||
FROM base as dependencies
|
||||
WORKDIR /home/worker/app
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
|
||||
ARG POETRY_EXTRAS="ui llms-fireworks embeddings-fireworks vector-stores-qdrant embeddings-openai"
|
||||
RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
|
||||
|
||||
FROM base as app
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PORT=8080
|
||||
ENV APP_ENV=prod
|
||||
ENV PYTHONPATH="$PYTHONPATH:/home/worker/app/private_gpt/"
|
||||
EXPOSE 8080
|
||||
|
||||
# Prepare a non-root user
|
||||
# More info about how to configure UIDs and GIDs in Docker:
|
||||
# https://github.com/systemd/systemd/blob/main/docs/UIDS-GIDS.md
|
||||
|
||||
# Define the User ID (UID) for the non-root user
|
||||
# UID 100 is chosen to avoid conflicts with existing system users
|
||||
ARG UID=100
|
||||
|
||||
# Define the Group ID (GID) for the non-root user
|
||||
# GID 65534 is often used for the 'nogroup' or 'nobody' group
|
||||
ARG GID=65534
|
||||
|
||||
RUN adduser --system --gid ${GID} --uid ${UID} --home /home/worker worker
|
||||
WORKDIR /home/worker/app
|
||||
|
||||
RUN chown worker /home/worker/app
|
||||
RUN mkdir local_data && chown worker local_data
|
||||
RUN mkdir models && chown worker models
|
||||
COPY --chown=worker --from=dependencies /home/worker/app/.venv/ .venv
|
||||
COPY --chown=worker private_gpt/ private_gpt
|
||||
COPY --chown=worker *.yaml .
|
||||
COPY --chown=worker scripts/ scripts
|
||||
|
||||
USER worker
|
||||
ENTRYPOINT python -m private_gpt
|
||||
|
|
@ -1,5 +1,4 @@
|
|||
services:
|
||||
|
||||
#-----------------------------------
|
||||
#---- Private-GPT services ---------
|
||||
#-----------------------------------
|
||||
|
|
@ -7,7 +6,7 @@ services:
|
|||
# Private-GPT service for the Ollama CPU and GPU modes
|
||||
# This service builds from an external Dockerfile and runs the Ollama mode.
|
||||
private-gpt-ollama:
|
||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
|
||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.ollama
|
||||
|
|
@ -80,7 +79,7 @@ services:
|
|||
ollama-cpu:
|
||||
image: ollama/ollama:latest
|
||||
volumes:
|
||||
- ./models:/root/.ollama
|
||||
- ./local_data:/root/.ollama
|
||||
profiles:
|
||||
- ""
|
||||
- ollama-cpu
|
||||
|
|
@ -98,4 +97,22 @@ services:
|
|||
count: 1
|
||||
capabilities: [gpu]
|
||||
profiles:
|
||||
- ollama-cuda
|
||||
- ollama-cuda
|
||||
|
||||
# fireworks service
|
||||
private-gpt-fireworks:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.fireworks
|
||||
volumes:
|
||||
- ./local_data/:/home/worker/app/local_data
|
||||
ports:
|
||||
- "3001:8080"
|
||||
environment:
|
||||
PORT: 8080
|
||||
PGPT_PROFILES: fireworks
|
||||
FIREWORKS_API_KEY: ${FIREWORKS_API_KEY}
|
||||
env_file:
|
||||
- .env
|
||||
profiles:
|
||||
- fireworks
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue