From 9cf972563e3ee5524b1d4ec2d46fa883d03803b0 Mon Sep 17 00:00:00 2001 From: Max Zangs Date: Fri, 8 Dec 2023 10:34:12 +0100 Subject: [PATCH 001/127] Add setup option to Makefile (#1368) --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index be062ce..a2e2d8d 100644 --- a/Makefile +++ b/Makefile @@ -52,4 +52,7 @@ ingest: @poetry run python scripts/ingest_folder.py $(call args) wipe: - poetry run python scripts/utils.py wipe \ No newline at end of file + poetry run python scripts/utils.py wipe + +setup: + poetry run python scripts/setup From 9302620eaca56d00818cb4db87ea1e8a8aa170f9 Mon Sep 17 00:00:00 2001 From: EEmlan <70259905+EEmlan@users.noreply.github.com> Date: Fri, 8 Dec 2023 11:26:25 +0100 Subject: [PATCH 002/127] Adding german speaking model to documentation (#1374) --- fern/docs/pages/recipes/list-llm.mdx | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/fern/docs/pages/recipes/list-llm.mdx b/fern/docs/pages/recipes/list-llm.mdx index 19ae479..2cb80e4 100644 --- a/fern/docs/pages/recipes/list-llm.mdx +++ b/fern/docs/pages/recipes/list-llm.mdx @@ -92,4 +92,14 @@ local: llm_hf_model_file: godzilla2-70b.Q4_K_M.gguf embedding_hf_model_name: BAAI/bge-large-en prompt_style: "llama2" -``` \ No newline at end of file +``` +### German speaking model +`settings-de.yaml`: +```yml +local: + llm_hf_repo_id: TheBloke/em_german_leo_mistral-GGUF + llm_hf_model_file: em_german_leo_mistral.Q4_K_M.gguf + embedding_hf_model_name: T-Systems-onsite/german-roberta-sentence-transformer-v2 + #llama, default or tag + prompt_style: "default" +``` From f235c50be925d47e62334187037f1d2838e8489d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Mart=C3=ADnez?= Date: Fri, 8 Dec 2023 22:39:23 +0100 Subject: [PATCH 003/127] Delete old docs (#1384) --- docs/.nojekyll | 0 docs/description.md | 474 ------------------ docs/index.html | 22 - docs/logo.png | Bin 2658 -> 0 bytes docs/openapi.json | 989 -------------------------------------- fern/openapi/openapi.json | 59 +-- private_gpt/launcher.py | 127 +---- 7 files changed, 35 insertions(+), 1636 deletions(-) delete mode 100644 docs/.nojekyll delete mode 100644 docs/description.md delete mode 100644 docs/index.html delete mode 100644 docs/logo.png delete mode 100644 docs/openapi.json diff --git a/docs/.nojekyll b/docs/.nojekyll deleted file mode 100644 index e69de29..0000000 diff --git a/docs/description.md b/docs/description.md deleted file mode 100644 index 024bf7b..0000000 --- a/docs/description.md +++ /dev/null @@ -1,474 +0,0 @@ -## Introduction - -PrivateGPT provides an **API** containing all the building blocks required to build -**private, context-aware AI applications**. The API follows and extends OpenAI API standard, and supports -both normal and streaming responses. - -The API is divided in two logical blocks: - -- High-level API, abstracting all the complexity of a RAG (Retrieval Augmented Generation) pipeline implementation: - - Ingestion of documents: internally managing document parsing, splitting, metadata extraction, - embedding generation and storage. - - Chat & Completions using context from ingested documents: abstracting the retrieval of context, the prompt - engineering and the response generation. -- Low-level API, allowing advanced users to implement their own complex pipelines: - - Embeddings generation: based on a piece of text. - - Contextual chunks retrieval: given a query, returns the most relevant chunks of text from the ingested - documents. - -> A working **Gradio UI client** is provided to test the API, together with a set of -> useful tools such as bulk model download script, ingestion script, documents folder -> watch, etc. - -## Quick Local Installation steps - -The steps in `Installation and Settings` section are better explained and cover more -setup scenarios. But if you are looking for a quick setup guide, here it is: - -``` -# Clone the repo -git clone https://github.com/imartinez/privateGPT -cd privateGPT - -# Install Python 3.11 -pyenv install 3.11 -pyenv local 3.11 - -# Install dependencies -poetry install --with ui,local - -# Download Embedding and LLM models -poetry run python scripts/setup - -# (Optional) For Mac with Metal GPU, enable it. Check Installation and Settings section -to know how to enable GPU on other platforms -CMAKE_ARGS="-DLLAMA_METAL=on" pip install --force-reinstall --no-cache-dir llama-cpp-python - -# Run the local server -PGPT_PROFILES=local make run - -# Note: on Mac with Metal you should see a ggml_metal_add_buffer log, stating GPU is -being used - -# Navigate to the UI and try it out! -http://localhost:8001/ -``` - -## Installation and Settings - -### Base requirements to run PrivateGPT - -* Git clone PrivateGPT repository, and navigate to it: - -``` - git clone https://github.com/imartinez/privateGPT - cd privateGPT -``` - -* Install Python 3.11. Ideally through a python version manager like `pyenv`. - Python 3.12 - should work too. Earlier python versions are not supported. - * osx/linux: [pyenv](https://github.com/pyenv/pyenv) - * windows: [pyenv-win](https://github.com/pyenv-win/pyenv-win) - -``` -pyenv install 3.11 -pyenv local 3.11 -``` - -* Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management: - -* Have a valid C++ compiler like gcc. See [Troubleshooting: C++ Compiler](#troubleshooting-c-compiler) for more details. - -* Install `make` for scripts: - * osx: (Using homebrew): `brew install make` - * windows: (Using chocolatey) `choco install make` - -### Install dependencies - -Install the dependencies: - -```bash -poetry install --with ui -``` - -Verify everything is working by running `make run` (or `poetry run python -m private_gpt`) and navigate to -http://localhost:8001. You should see a [Gradio UI](https://gradio.app/) **configured with a mock LLM** that will -echo back the input. Later we'll see how to configure a real LLM. - -### Settings - -> Note: the default settings of PrivateGPT work out-of-the-box for a 100% local setup. Skip this section if you just -> want to test PrivateGPT locally, and come back later to learn about more configuration options. - -PrivateGPT is configured through *profiles* that are defined using yaml files, and selected through env variables. -The full list of properties configurable can be found in `settings.yaml` - -#### env var `PGPT_SETTINGS_FOLDER` - -The location of the settings folder. Defaults to the root of the project. -Should contain the default `settings.yaml` and any other `settings-{profile}.yaml`. - -#### env var `PGPT_PROFILES` - -By default, the profile definition in `settings.yaml` is loaded. -Using this env var you can load additional profiles; format is a comma separated list of profile names. -This will merge `settings-{profile}.yaml` on top of the base settings file. - -For example: -`PGPT_PROFILES=local,cuda` will load `settings-local.yaml` -and `settings-cuda.yaml`, their contents will be merged with -later profiles properties overriding values of earlier ones like `settings.yaml`. - -During testing, the `test` profile will be active along with the default, therefore `settings-test.yaml` -file is required. - -#### Environment variables expansion - -Configuration files can contain environment variables, -they will be expanded at runtime. - -Expansion must follow the pattern `${VARIABLE_NAME:default_value}`. - -For example, the following configuration will use the value of the `PORT` -environment variable or `8001` if it's not set. -Missing variables with no default will produce an error. - -```yaml -server: - port: ${PORT:8001} -``` - -### Local LLM requirements - -Install extra dependencies for local execution: - -```bash -poetry install --with local -``` - -For PrivateGPT to run fully locally GPU acceleration is required -(CPU execution is possible, but very slow), however, -typical Macbook laptops or window desktops with mid-range GPUs lack VRAM to run -even the smallest LLMs. For that reason -**local execution is only supported for models compatible with [llama.cpp](https://github.com/ggerganov/llama.cpp)** - -These two models are known to work well: - -* https://huggingface.co/TheBloke/Llama-2-7B-chat-GGUF -* https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF (recommended) - -To ease the installation process, use the `setup` script that will download both -the embedding and the LLM model and place them in the correct location (under `models` folder): - -```bash -poetry run python scripts/setup -``` - -If you are ok with CPU execution, you can skip the rest of this section. - -As stated before, llama.cpp is required and in -particular [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) -is used. - -> It's highly encouraged that you fully read llama-cpp and llama-cpp-python documentation relevant to your platform. -> Running into installation issues is very likely, and you'll need to troubleshoot them yourself. - -#### Customizing low level parameters - -Currently not all the parameters of llama-cpp and llama-cpp-python are available at PrivateGPT's `settings.yaml` file. In case you need to customize parameters such as the number of layers loaded into the GPU, you might change these at the `llm_component.py` file under the `private_gpt/components/llm/llm_component.py`. If you are getting an out of memory error, you might also try a smaller model or stick to the proposed recommended models, instead of custom tuning the parameters. - -#### OSX GPU support - -You will need to build [llama.cpp](https://github.com/ggerganov/llama.cpp) with -metal support. To do that run: - -```bash -CMAKE_ARGS="-DLLAMA_METAL=on" pip install --force-reinstall --no-cache-dir llama-cpp-python -``` - -#### Windows NVIDIA GPU support - -Windows GPU support is done through CUDA. -Follow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required -dependencies. - -Some tips to get it working with an NVIDIA card and CUDA (Tested on Windows 10 with CUDA 11.5 RTX 3070): - -* Install latest VS2022 (and build tools) https://visualstudio.microsoft.com/vs/community/ -* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads -* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to - date and your GPU is detected. -* [Optional] Install CMake to troubleshoot building issues by compiling llama.cpp directly https://cmake.org/download/ - -If you have all required dependencies properly configured running the -following powershell command should succeed. - -```powershell -$env:CMAKE_ARGS='-DLLAMA_CUBLAS=on'; poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python -``` - -If your installation was correct, you should see a message similar to the following next -time you start the server `BLAS = 1`. - -``` -llama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB) -AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | -``` - -Note that llama.cpp offloads matrix calculations to the GPU but the performance is -still hit heavily due to latency between CPU and GPU communication. You might need to tweak -batch sizes and other parameters to get the best performance for your particular system. - -#### Linux NVIDIA GPU support and Windows-WSL - -Linux GPU support is done through CUDA. -Follow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required -external -dependencies. - -Some tips: - -* Make sure you have an up-to-date C++ compiler -* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads -* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to - date and your GPU is detected. - -After that running the following command in the repository will install llama.cpp with GPU support: - -` -CMAKE_ARGS='-DLLAMA_CUBLAS=on' poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python -` - -If your installation was correct, you should see a message similar to the following next -time you start the server `BLAS = 1`. - -``` -llama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB) -AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | -``` - -#### Vectorstores -PrivateGPT supports [Chroma](https://www.trychroma.com/), [Qdrant](https://qdrant.tech/) as vectorstore providers. Chroma being the default. - -To enable Qdrant, set the `vectorstore.database` property in the `settings.yaml` file to `qdrant` and install the `qdrant` extra. - -```bash -poetry install --extras qdrant -``` - -By default Qdrant tries to connect to an instance at `http://localhost:3000`. - -Qdrant settings can be configured by setting values to the `qdrant` property in the `settings.yaml` file. - -The available configuration options are: -| Field | Description | -|--------------|-------------| -| location | If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.| -| url | Either host or str of 'Optional[scheme], host, Optional[port], Optional[prefix]'.
Eg. `http://localhost:6333` | -| port | Port of the REST API interface. Default: `6333` | -| grpc_port | Port of the gRPC interface. Default: `6334` | -| prefer_grpc | If `true` - use gRPC interface whenever possible in custom methods. | -| https | If `true` - use HTTPS(SSL) protocol.| -| api_key | API key for authentication in Qdrant Cloud.| -| prefix | If set, add `prefix` to the REST URL path.
Example: `service/v1` will result in `http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.| -| timeout | Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC | -| host | Host name of Qdrant service. If url and host are not set, defaults to 'localhost'.| -| path | Persistence path for QdrantLocal. Eg. `local_data/private_gpt/qdrant`| -| force_disable_check_same_thread | Force disable check_same_thread for QdrantLocal sqlite connection.| - -#### Known issues and Troubleshooting - -Execution of LLMs locally still has a lot of sharp edges, specially when running on non Linux platforms. -You might encounter several issues: - -* Performance: RAM or VRAM usage is very high, your computer might experience slowdowns or even crashes. -* GPU Virtualization on Windows and OSX: Simply not possible with docker desktop, you have to run the server directly on - the host. -* Building errors: Some of PrivateGPT dependencies need to build native code, and they might fail on some platforms. - Most likely you are missing some dev tools in your machine (updated C++ compiler, CUDA is not on PATH, etc.). - If you encounter any of these issues, please open an issue and we'll try to help. - -#### Troubleshooting: C++ Compiler - -If you encounter an error while building a wheel during the `pip install` process, you may need to install a C++ -compiler on your computer. - -**For Windows 10/11** - -To install a C++ compiler on Windows 10/11, follow these steps: - -1. Install Visual Studio 2022. -2. Make sure the following components are selected: - * Universal Windows Platform development - * C++ CMake tools for Windows -3. Download the MinGW installer from the [MinGW website](https://sourceforge.net/projects/mingw/). -4. Run the installer and select the `gcc` component. - -** For OSX ** - -1. Check if you have a C++ compiler installed, Xcode might have done it for you. for example running `gcc`. -2. If not, you can install clang or gcc with homebrew `brew install gcc` - -#### Troubleshooting: Mac Running Intel - -When running a Mac with Intel hardware (not M1), you may run into _clang: error: the clang compiler does not support ' --march=native'_ during pip install. - -If so set your archflags during pip install. eg: _ARCHFLAGS="-arch x86_64" pip3 install -r requirements.txt_ - -## Running the Server - -After following the installation steps you should be ready to go. Here are some common run setups: - -### Running 100% locally - -Make sure you have followed the *Local LLM requirements* section before moving on. - -This command will start PrivateGPT using the `settings.yaml` (default profile) together with the `settings-local.yaml` -configuration files. By default, it will enable both the API and the Gradio UI. Run: - -``` -PGPT_PROFILES=local make run -``` - -or - -``` -PGPT_PROFILES=local poetry run python -m private_gpt -``` - -When the server is started it will print a log *Application startup complete*. -Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API -using Swagger UI. - -### Local server using OpenAI as LLM - -If you cannot run a local model (because you don't have a GPU, for example) or for testing purposes, you may -decide to run PrivateGPT using OpenAI as the LLM. - -In order to do so, create a profile `settings-openai.yaml` with the following contents: - -```yaml -llm: - mode: openai - -openai: - api_key: # You could skip this configuration and use the OPENAI_API_KEY env var instead -``` - -And run PrivateGPT loading that profile you just created: - -```PGPT_PROFILES=openai make run``` - -or - -```PGPT_PROFILES=openai poetry run python -m private_gpt``` - -> Note this will still use the local Embeddings model, as it is ok to use it on a CPU. -> We'll support using OpenAI embeddings in a future release. - -When the server is started it will print a log *Application startup complete*. -Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API. -You'll notice the speed and quality of response is higher, given you are using OpenAI's servers for the heavy -computations. - -### Use AWS's Sagemaker - -🚧 Under construction 🚧 - -## Gradio UI user manual - -Gradio UI is a ready to use way of testing most of PrivateGPT API functionalities. - -![Gradio PrivateGPT](https://lh3.googleusercontent.com/drive-viewer/AK7aPaD_Hc-A8A9ooMe-hPgm_eImgsbxAjb__8nFYj8b_WwzvL1Gy90oAnp1DfhPaN6yGiEHCOXs0r77W1bYHtPzlVwbV7fMsA=s1600) - -### Execution Modes - -It has 3 modes of execution (you can select in the top-left): - -* Query Docs: uses the context from the - ingested documents to answer the questions posted in the chat. It also takes - into account previous chat messages as context. - * Makes use of `/chat/completions` API with `use_context=true` and no - `context_filter`. -* Search in Docs: fast search that returns the 4 most related text - chunks, together with their source document and page. - * Makes use of `/chunks` API with no `context_filter`, `limit=4` and - `prev_next_chunks=0`. -* LLM Chat: simple, non-contextual chat with the LLM. The ingested documents won't - be taken into account, only the previous messages. - * Makes use of `/chat/completions` API with `use_context=false`. - -### Document Ingestion - -Ingest documents by using the `Upload a File` button. You can check the progress of -the ingestion in the console logs of the server. - -The list of ingested files is shown below the button. - -If you want to delete the ingested documents, refer to *Reset Local documents -database* section in the documentation. - -### Chat - -Normal chat interface, self-explanatory ;) - -You can check the actual prompt being passed to the LLM by looking at the logs of -the server. We'll add better observability in future releases. - -## Deployment options - -🚧 We are working on Dockerized deployment guidelines 🚧 - -## Observability - -Basic logs are enabled using LlamaIndex -basic logging (for example ingestion progress or LLM prompts and answers). - -🚧 We are working on improved Observability. 🚧 - -## Ingesting & Managing Documents - -🚧 Document Update and Delete are still WIP. 🚧 - -The ingestion of documents can be done in different ways: - -* Using the `/ingest` API -* Using the Gradio UI -* Using the Bulk Local Ingestion functionality (check next section) - -### Bulk Local Ingestion - -When you are running PrivateGPT in a fully local setup, you can ingest a complete folder for convenience (containing -pdf, text files, etc.) -and optionally watch changes on it with the command: - -```bash -make ingest /path/to/folder -- --watch -``` - -To log the processed and failed files to an additional file, use: - -```bash -make ingest /path/to/folder -- --watch --log-file /path/to/log/file.log -``` - -After ingestion is complete, you should be able to chat with your documents -by navigating to http://localhost:8001 and using the option `Query documents`, -or using the completions / chat API. - -### Reset Local documents database - -When running in a local setup, you can remove all ingested documents by simply -deleting all contents of `local_data` folder (except .gitignore). - -To simplify this process, you can use the command: -```bash -make wipe -``` - -## API - -As explained in the introduction, the API contains high level APIs (ingestion and chat/completions) and low level APIs -(embeddings and chunk retrieval). In this section the different specific API calls are explained. diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 5ed134d..0000000 --- a/docs/index.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - PrivateGPT Docs - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/logo.png b/docs/logo.png deleted file mode 100644 index ec7164708a9ef1511d5f0c83baf28f120e3db274..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2658 zcmbVO_d6R37Y;RA)YjtGE@=={YsCmfP>?RIt7LlXNv~|#5P03`?cCoG-W(nMZGwlt-ikRfK9SV{Pu{_KbSma#hTl7KPiWEQ&NWWR78s)=UcUyXG3j!5c6x~OTqOS2>hrDeANHqp zqRD|Cpy&}&fMlzi9r6p^Lr@=W;*D?Jp2-CO5F?XJiPM@XgSGv3fKh4NksGa^g1mqg zi@#mP{d>Co%yiZ&;;V7o{refr`Y^!rx04uM&%>3z{I$wt2dkm0MS_a0+ndFb6VaIq z2RmbDcMf?FI9&aysLrB>2`}a2$-32G*|SnzpZ=3+TIr)Q_q=#IS%=U=wwM!}_qXH{ z+r?V&PU6dL8&@mlUq983vUtC>WqFCM<0LHemc~2(IOs^rwoK#VS-t6 z$d7%ZQ3s`K$z2!Ip7+`@kinBo);SEvftMb+YcN`V^~%m=#j>MGRL&mxc=M1EaxJof z66F@9;7otfe)o>jU~sPnXBiEsx?>g^UFzCo8MsMEi1-M@fCIjnn|)fubMaqUCP$Rg z*WY}L#!;@P&$dmC2D8?A2N4U}qKd+fyRh}ut8fc24>HG!wOMJx;ZFWlwWGwU-8WG3 zSv+JfHcm9OD#%$RR_qAo@E|QoEOI%YRqW9OfiX3SdTPwF_q)u@#!1fucbAtED4dR( zlK^e2h;%6BW4^#D_9>L*s*3ZjoPf4Mam!M~ZG2ZXUqYW66^pecp^Wvbbm%1ymVgz) zhei(Pez9l3^RFZMBz`dOA-F+M=w>pTP&)pwcTnrdmYVJlI#hULf+t69BAyCE(C?uy zCc?1vFNVx>na>4Sj@-{1&SeQr2a%&`^qSsVU6q_ao)p7jZNq(P19D!qK0_j3H_!JJ zw}^}0B;lt*Aj>nv8Qp!Qt&~WxK9UeYHI|;Ru9rWdg)W|^lf)b3QomV~o(|;Qt&g(+ zR@$7%mAVd@+tFmHwjH3*>d({LCNAXJl%Agt4p2$4tTFOIz1mH;4?3}|+UG9HGiiQG z9BzF(!cWUIsM--6HEn=oi1s^a&vr>EpWtrS8m-3|sw`!k4DE?pEq9%Elsb&PC+Yd| zIE~pt6Dndh0^WF07l2rHyMZPd5rprR2upu{u1Ph_c_l8=&$n$-BGkWOd?{t@yCdZc zt9UD#$XgH@J5Jj~W&mdxce*@xWEC5B4~Y(R1pO4`aqNc~ktgQOlM#mH0s&M(Xp
Llu=?^n(K09xQgkx$t5);sXf6{ov`Msq$C|O+3h`{ECL@%R>MT`K#mG3&n9*kN z@Mo4^m;7$8lGI*I(rM57FW+eRX6#DTLwKRR{ga8T8EOyQ*3aikf$bvKkOc5)Dy~hd zHzRF=&;~OIyc`IPB-Y&#YBy-axDakGWaSy(K2eFB!Cmv}FQ+xaSws&F0kX{vy@Fh=-oDqnw1rb4gm&vUya4!pbP?GMu zOfd@6j4R*?RwK;|mVP5#EMbL0bp zT{om!Mbbw9^4y?Iw8~$sXlBXla_lc&xoHR^AvOM^>l8{;$-q&Rp;q`#-13&llRr4W zNS`Er8MnUmSjuqCQ;LaEMNEI2pw_AkOd&Ev6oavm3NP@=0%}ahUa?SN)HxrBgQyOf z2zE3!!-S^1=Dt=_mN$IkGR+q*RLamF2CHH>{B17yx6N(AaE#s0FuaCP`;bdLapYQo zO@m=-PO{$p$FO?cul`r+kSI{LwN^IB$&Za`l>(o&^K9QqA#Wdy1Kk$>^uGTA62CEx zCYzKEVwvDp!TN8X;Eg|rB>stwNqS)PBW39*=1?&9`s^Dx6|b|4Hr4Eq-!{#n>nGj` z7x{#X43gLCEah5k`chctf@-zrljaVzivfFmFa#Q8rS$UKCVdLz z_V3hk{ym|yX_C5&N|u)q-=YCe4*MSYFH3HqP=kbWt7Nu!z6yPGgtfY3$vb}W3H z$dx3X==+32u;@{c3D{%E9ELE=9Tu0%yIx%=UX;Cb$P{{3!PlKQEQ1mM0qIr3L5|Pi zqnzCff~^BZP9>JzAFn@po1=Xw(Hc07O>U}s2r4S@_w7LKkxOj3QXq(9pJi(T)9r5c z(bH$s8aP$TbntLXeAzxy3o%q5r2PH#RF$Z;n=^Wz-OM7fumoC}nm2=_D(8N&)-`d`6eQG~!?$%8kR#L&Pz@RCfa+$BLU8DoYiz9W{FGA6a^hr_w{YdwDTSYN z%-%n+a061~GdMuIZs76gU@m;C%|t&$@)c(7Z4iV|RKyt3kX{Y5PKhnrOzz(wiW>i3 zMr&9s4Bq2hdcN8!P&f)c2Is#VR4^*2{*+~Iq`@?=S1McZJt3FfmfIjB! z?I0(qs-V7QjIE8xh+T7KZzA$qhkavnxZ7bJ`m3UL>g!ENK1m_)(@gp7@_u@ZHW%^T z6lu7mtoWPk0UGvu?1zOGJ1){LBYatEB4Q7=KF=aX!ol7rAb~@X*?xE5Bad-98*>zj zeV^J<4>9g|<}4F3wD^?%Wg+b#z&%}QK DkrwUH diff --git a/docs/openapi.json b/docs/openapi.json deleted file mode 100644 index 306ba28..0000000 --- a/docs/openapi.json +++ /dev/null @@ -1,989 +0,0 @@ -{ - "openapi": "3.1.0", - "info": { - "title": "PrivateGPT", - "summary": "PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.", - "description": "## Introduction\n\nPrivateGPT provides an **API** containing all the building blocks required to build\n**private, context-aware AI applications**. The API follows and extends OpenAI API standard, and supports\nboth normal and streaming responses.\n\nThe API is divided in two logical blocks:\n\n- High-level API, abstracting all the complexity of a RAG (Retrieval Augmented Generation) pipeline implementation:\n - Ingestion of documents: internally managing document parsing, splitting, metadata extraction,\n embedding generation and storage.\n - Chat & Completions using context from ingested documents: abstracting the retrieval of context, the prompt\n engineering and the response generation.\n- Low-level API, allowing advanced users to implement their own complex pipelines:\n - Embeddings generation: based on a piece of text.\n - Contextual chunks retrieval: given a query, returns the most relevant chunks of text from the ingested\n documents.\n\n> A working **Gradio UI client** is provided to test the API, together with a set of\n> useful tools such as bulk model download script, ingestion script, documents folder\n> watch, etc.\n\n## Quick Local Installation steps\n\nThe steps in `Installation and Settings` section are better explained and cover more\nsetup scenarios. But if you are looking for a quick setup guide, here it is:\n\n```\n# Clone the repo\ngit clone https://github.com/imartinez/privateGPT\ncd privateGPT\n\n# Install Python 3.11\npyenv install 3.11\npyenv local 3.11\n\n# Install dependencies\npoetry install --with ui,local\n\n# Download Embedding and LLM models\npoetry run python scripts/setup\n\n# (Optional) For Mac with Metal GPU, enable it. Check Installation and Settings section \nto know how to enable GPU on other platforms\nCMAKE_ARGS=\"-DLLAMA_METAL=on\" pip install --force-reinstall --no-cache-dir llama-cpp-python\n\n# Run the local server \nPGPT_PROFILES=local make run\n\n# Note: on Mac with Metal you should see a ggml_metal_add_buffer log, stating GPU is \nbeing used\n\n# Navigate to the UI and try it out! \nhttp://localhost:8001/\n```\n\n## Installation and Settings\n\n### Base requirements to run PrivateGPT\n\n* Git clone PrivateGPT repository, and navigate to it:\n\n```\n git clone https://github.com/imartinez/privateGPT\n cd privateGPT\n```\n\n* Install Python 3.11. Ideally through a python version manager like `pyenv`.\n Python 3.12\n should work too. Earlier python versions are not supported.\n * osx/linux: [pyenv](https://github.com/pyenv/pyenv)\n * windows: [pyenv-win](https://github.com/pyenv-win/pyenv-win)\n\n``` \npyenv install 3.11\npyenv local 3.11\n```\n\n* Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management:\n\n* Have a valid C++ compiler like gcc. See [Troubleshooting: C++ Compiler](#troubleshooting-c-compiler) for more details.\n\n* Install `make` for scripts:\n * osx: (Using homebrew): `brew install make`\n * windows: (Using chocolatey) `choco install make`\n\n### Install dependencies\n\nInstall the dependencies:\n\n```bash\npoetry install --with ui\n```\n\nVerify everything is working by running `make run` (or `poetry run python -m private_gpt`) and navigate to\nhttp://localhost:8001. You should see a [Gradio UI](https://gradio.app/) **configured with a mock LLM** that will\necho back the input. Later we'll see how to configure a real LLM.\n\n### Settings\n\n> Note: the default settings of PrivateGPT work out-of-the-box for a 100% local setup. Skip this section if you just\n> want to test PrivateGPT locally, and come back later to learn about more configuration options.\n\nPrivateGPT is configured through *profiles* that are defined using yaml files, and selected through env variables.\nThe full list of properties configurable can be found in `settings.yaml`\n\n#### env var `PGPT_SETTINGS_FOLDER`\n\nThe location of the settings folder. Defaults to the root of the project.\nShould contain the default `settings.yaml` and any other `settings-{profile}.yaml`.\n\n#### env var `PGPT_PROFILES`\n\nBy default, the profile definition in `settings.yaml` is loaded.\nUsing this env var you can load additional profiles; format is a comma separated list of profile names.\nThis will merge `settings-{profile}.yaml` on top of the base settings file.\n\nFor example:\n`PGPT_PROFILES=local,cuda` will load `settings-local.yaml`\nand `settings-cuda.yaml`, their contents will be merged with\nlater profiles properties overriding values of earlier ones like `settings.yaml`.\n\nDuring testing, the `test` profile will be active along with the default, therefore `settings-test.yaml`\nfile is required.\n\n#### Environment variables expansion\n\nConfiguration files can contain environment variables,\nthey will be expanded at runtime.\n\nExpansion must follow the pattern `${VARIABLE_NAME:default_value}`.\n\nFor example, the following configuration will use the value of the `PORT`\nenvironment variable or `8001` if it's not set.\nMissing variables with no default will produce an error.\n\n```yaml\nserver:\n port: ${PORT:8001}\n```\n\n### Local LLM requirements\n\nInstall extra dependencies for local execution:\n\n```bash\npoetry install --with local\n```\n\nFor PrivateGPT to run fully locally GPU acceleration is required\n(CPU execution is possible, but very slow), however,\ntypical Macbook laptops or window desktops with mid-range GPUs lack VRAM to run\neven the smallest LLMs. For that reason\n**local execution is only supported for models compatible with [llama.cpp](https://github.com/ggerganov/llama.cpp)**\n\nThese two models are known to work well:\n\n* https://huggingface.co/TheBloke/Llama-2-7B-chat-GGUF\n* https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF (recommended)\n\nTo ease the installation process, use the `setup` script that will download both\nthe embedding and the LLM model and place them in the correct location (under `models` folder):\n\n```bash\npoetry run python scripts/setup\n```\n\nIf you are ok with CPU execution, you can skip the rest of this section.\n\nAs stated before, llama.cpp is required and in\nparticular [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)\nis used.\n\n> It's highly encouraged that you fully read llama-cpp and llama-cpp-python documentation relevant to your platform.\n> Running into installation issues is very likely, and you'll need to troubleshoot them yourself.\n\n#### Customizing low level parameters\n\nCurrently not all the parameters of llama-cpp and llama-cpp-python are available at PrivateGPT's `settings.yaml` file. In case you need to customize parameters such as the number of layers loaded into the GPU, you might change these at the `llm_component.py` file under the `private_gpt/components/llm/llm_component.py`. If you are getting an out of memory error, you might also try a smaller model or stick to the proposed recommended models, instead of custom tuning the parameters.\n\n#### OSX GPU support\n\nYou will need to build [llama.cpp](https://github.com/ggerganov/llama.cpp) with\nmetal support. To do that run:\n\n```bash\nCMAKE_ARGS=\"-DLLAMA_METAL=on\" pip install --force-reinstall --no-cache-dir llama-cpp-python\n```\n\n#### Windows NVIDIA GPU support\n\nWindows GPU support is done through CUDA.\nFollow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required\ndependencies.\n\nSome tips to get it working with an NVIDIA card and CUDA (Tested on Windows 10 with CUDA 11.5 RTX 3070):\n\n* Install latest VS2022 (and build tools) https://visualstudio.microsoft.com/vs/community/\n* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads\n* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to\n date and your GPU is detected.\n* [Optional] Install CMake to troubleshoot building issues by compiling llama.cpp directly https://cmake.org/download/\n\nIf you have all required dependencies properly configured running the\nfollowing powershell command should succeed.\n\n```powershell\n$env:CMAKE_ARGS='-DLLAMA_CUBLAS=on'; poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python\n```\n\nIf your installation was correct, you should see a message similar to the following next\ntime you start the server `BLAS = 1`.\n\n```\nllama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB)\nAVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | \n```\n\nNote that llama.cpp offloads matrix calculations to the GPU but the performance is\nstill hit heavily due to latency between CPU and GPU communication. You might need to tweak\nbatch sizes and other parameters to get the best performance for your particular system.\n\n#### Linux NVIDIA GPU support and Windows-WSL\n\nLinux GPU support is done through CUDA.\nFollow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required\nexternal\ndependencies.\n\nSome tips:\n\n* Make sure you have an up-to-date C++ compiler\n* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads\n* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to\n date and your GPU is detected.\n\nAfter that running the following command in the repository will install llama.cpp with GPU support:\n\n`\nCMAKE_ARGS='-DLLAMA_CUBLAS=on' poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python\n`\n\nIf your installation was correct, you should see a message similar to the following next\ntime you start the server `BLAS = 1`.\n\n```\nllama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB)\nAVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | \n```\n\n#### Vectorstores\nPrivateGPT supports [Chroma](https://www.trychroma.com/), [Qdrant](https://qdrant.tech/) as vectorstore providers. Chroma being the default.\n\nTo enable Qdrant, set the `vectorstore.database` property in the `settings.yaml` file to `qdrant` and install the `qdrant` extra.\n\n```bash\npoetry install --extras qdrant\n```\n\nBy default Qdrant tries to connect to an instance at `http://localhost:3000`.\n\nQdrant settings can be configured by setting values to the `qdrant` propery in the `settings.yaml` file.\n\nThe available configuration options are:\n| Field | Description |\n|--------------|-------------|\n| location | If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.|\n| url | Either host or str of 'Optional[scheme], host, Optional[port], Optional[prefix]'.
Eg. `http://localhost:6333` |\n| port | Port of the REST API interface. Default: `6333` |\n| grpc_port | Port of the gRPC interface. Default: `6334` |\n| prefer_grpc | If `true` - use gRPC interface whenever possible in custom methods. |\n| https | If `true` - use HTTPS(SSL) protocol.|\n| api_key | API key for authentication in Qdrant Cloud.|\n| prefix | If set, add `prefix` to the REST URL path.
Example: `service/v1` will result in `http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.|\n| timeout | Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC |\n| host | Host name of Qdrant service. If url and host are not set, defaults to 'localhost'.|\n| path | Persistence path for QdrantLocal. Eg. `local_data/private_gpt/qdrant`|\n| force_disable_check_same_thread | Force disable check_same_thread for QdrantLocal sqlite connection.|\n\n#### Known issues and Troubleshooting\n\nExecution of LLMs locally still has a lot of sharp edges, specially when running on non Linux platforms.\nYou might encounter several issues:\n\n* Performance: RAM or VRAM usage is very high, your computer might experience slowdowns or even crashes.\n* GPU Virtualization on Windows and OSX: Simply not possible with docker desktop, you have to run the server directly on\n the host.\n* Building errors: Some of PrivateGPT dependencies need to build native code, and they might fail on some platforms.\n Most likely you are missing some dev tools in your machine (updated C++ compiler, CUDA is not on PATH, etc.).\n If you encounter any of these issues, please open an issue and we'll try to help.\n\n#### Troubleshooting: C++ Compiler\n\nIf you encounter an error while building a wheel during the `pip install` process, you may need to install a C++\ncompiler on your computer.\n\n**For Windows 10/11**\n\nTo install a C++ compiler on Windows 10/11, follow these steps:\n\n1. Install Visual Studio 2022.\n2. Make sure the following components are selected:\n * Universal Windows Platform development\n * C++ CMake tools for Windows\n3. Download the MinGW installer from the [MinGW website](https://sourceforge.net/projects/mingw/).\n4. Run the installer and select the `gcc` component.\n\n** For OSX **\n\n1. Check if you have a C++ compiler installed, Xcode might have done it for you. for example running `gcc`.\n2. If not, you can install clang or gcc with homebrew `brew install gcc`\n\n#### Troubleshooting: Mac Running Intel\n\nWhen running a Mac with Intel hardware (not M1), you may run into _clang: error: the clang compiler does not support '\n-march=native'_ during pip install.\n\nIf so set your archflags during pip install. eg: _ARCHFLAGS=\"-arch x86_64\" pip3 install -r requirements.txt_\n\n## Running the Server\n\nAfter following the installation steps you should be ready to go. Here are some common run setups:\n\n### Running 100% locally\n\nMake sure you have followed the *Local LLM requirements* section before moving on.\n\nThis command will start PrivateGPT using the `settings.yaml` (default profile) together with the `settings-local.yaml`\nconfiguration files. By default, it will enable both the API and the Gradio UI. Run:\n\n```\nPGPT_PROFILES=local make run\n``` \n\nor\n\n```\nPGPT_PROFILES=local poetry run python -m private_gpt\n```\n\nWhen the server is started it will print a log *Application startup complete*.\nNavigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API\nusing Swagger UI.\n\n### Local server using OpenAI as LLM\n\nIf you cannot run a local model (because you don't have a GPU, for example) or for testing purposes, you may\ndecide to run PrivateGPT using OpenAI as the LLM.\n\nIn order to do so, create a profile `settings-openai.yaml` with the following contents:\n\n```yaml\nllm:\n mode: openai\n\nopenai:\n api_key: # You could skip this configuration and use the OPENAI_API_KEY env var instead\n```\n\nAnd run PrivateGPT loading that profile you just created:\n\n```PGPT_PROFILES=openai make run```\n\nor\n\n```PGPT_PROFILES=openai poetry run python -m private_gpt```\n\n> Note this will still use the local Embeddings model, as it is ok to use it on a CPU.\n> We'll support using OpenAI embeddings in a future release.\n\nWhen the server is started it will print a log *Application startup complete*.\nNavigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API.\nYou'll notice the speed and quality of response is higher, given you are using OpenAI's servers for the heavy\ncomputations.\n\n### Use AWS's Sagemaker\n\n\ud83d\udea7 Under construction \ud83d\udea7\n\n## Gradio UI user manual\n\nGradio UI is a ready to use way of testing most of PrivateGPT API functionalities.\n\n![Gradio PrivateGPT](https://lh3.googleusercontent.com/drive-viewer/AK7aPaD_Hc-A8A9ooMe-hPgm_eImgsbxAjb__8nFYj8b_WwzvL1Gy90oAnp1DfhPaN6yGiEHCOXs0r77W1bYHtPzlVwbV7fMsA=s1600)\n\n### Execution Modes\n\nIt has 3 modes of execution (you can select in the top-left):\n\n* Query Docs: uses the context from the\n ingested documents to answer the questions posted in the chat. It also takes\n into account previous chat messages as context.\n * Makes use of `/chat/completions` API with `use_context=true` and no\n `context_filter`.\n* Search in Docs: fast search that returns the 4 most related text\n chunks, together with their source document and page.\n * Makes use of `/chunks` API with no `context_filter`, `limit=4` and\n `prev_next_chunks=0`.\n* LLM Chat: simple, non-contextual chat with the LLM. The ingested documents won't\n be taken into account, only the previous messages.\n * Makes use of `/chat/completions` API with `use_context=false`.\n\n### Document Ingestion\n\nIngest documents by using the `Upload a File` button. You can check the progress of\nthe ingestion in the console logs of the server.\n\nThe list of ingested files is shown below the button.\n\nIf you want to delete the ingested documents, refer to *Reset Local documents\ndatabase* section in the documentation.\n\n### Chat\n\nNormal chat interface, self-explanatory ;)\n\nYou can check the actual prompt being passed to the LLM by looking at the logs of\nthe server. We'll add better observability in future releases.\n\n## Deployment options\n\n\ud83d\udea7 We are working on Dockerized deployment guidelines \ud83d\udea7\n\n## Observability\n\nBasic logs are enabled using LlamaIndex\nbasic logging (for example ingestion progress or LLM prompts and answers).\n\n\ud83d\udea7 We are working on improved Observability. \ud83d\udea7\n\n## Ingesting & Managing Documents\n\n\ud83d\udea7 Document Update and Delete are still WIP. \ud83d\udea7\n\nThe ingestion of documents can be done in different ways:\n\n* Using the `/ingest` API\n* Using the Gradio UI\n* Using the Bulk Local Ingestion functionality (check next section)\n\n### Bulk Local Ingestion\n\nWhen you are running PrivateGPT in a fully local setup, you can ingest a complete folder for convenience (containing\npdf, text files, etc.)\nand optionally watch changes on it with the command:\n\n```bash\nmake ingest /path/to/folder -- --watch\n```\n\nTo log the processed and failed files to an additional file, use:\n\n```bash\nmake ingest /path/to/folder -- --watch --log-file /path/to/log/file.log\n```\n\nAfter ingestion is complete, you should be able to chat with your documents\nby navigating to http://localhost:8001 and using the option `Query documents`,\nor using the completions / chat API.\n\n### Reset Local documents database\n\nWhen running in a local setup, you can remove all ingested documents by simply\ndeleting all contents of `local_data` folder (except .gitignore).\n\n## API\n\nAs explained in the introduction, the API contains high level APIs (ingestion and chat/completions) and low level APIs\n(embeddings and chunk retrieval). In this section the different specific API calls are explained.\n", - "contact": { - "url": "https://github.com/imartinez/privateGPT" - }, - "license": { - "name": "Apache 2.0", - "url": "https://www.apache.org/licenses/LICENSE-2.0.html" - }, - "version": "0.1.0", - "x-logo": { - "url": "https://lh3.googleusercontent.com/drive-viewer/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGjE1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560" - } - }, - "paths": { - "/v1/completions": { - "post": { - "tags": [ - "Contextual Completions" - ], - "summary": "Completion", - "description": "We recommend most users use our Chat completions API.\n\nGiven a prompt, the model will return one predicted completion. If `use_context`\nis set to `true`, the model will use context coming from the ingested documents\nto create the response. The documents being used can be filtered using the\n`context_filter` and passing the document IDs to be used. Ingested documents IDs\ncan be found using `/ingest/list` endpoint. If you want all ingested documents to\nbe used, remove `context_filter` altogether.\n\nWhen using `'include_sources': true`, the API will return the source Chunks used\nto create the response, which come from the context provided.\n\nWhen using `'stream': true`, the API will return data chunks following [OpenAI's\nstreaming model](https://platform.openai.com/docs/api-reference/chat/streaming):\n```\n{\"id\":\"12345\",\"object\":\"completion.chunk\",\"created\":1694268190,\n\"model\":\"private-gpt\",\"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\n\"finish_reason\":null}]}\n```", - "operationId": "prompt_completion_v1_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionsBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAICompletion" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/chat/completions": { - "post": { - "tags": [ - "Contextual Completions" - ], - "summary": "Chat Completion", - "description": "Given a list of messages comprising a conversation, return a response.\n\nIf `use_context` is set to `true`, the model will use context coming\nfrom the ingested documents to create the response. The documents being used can\nbe filtered using the `context_filter` and passing the document IDs to be used.\nIngested documents IDs can be found using `/ingest/list` endpoint. If you want\nall ingested documents to be used, remove `context_filter` altogether.\n\nWhen using `'include_sources': true`, the API will return the source Chunks used\nto create the response, which come from the context provided.\n\nWhen using `'stream': true`, the API will return data chunks following [OpenAI's\nstreaming model](https://platform.openai.com/docs/api-reference/chat/streaming):\n```\n{\"id\":\"12345\",\"object\":\"completion.chunk\",\"created\":1694268190,\n\"model\":\"private-gpt\",\"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\n\"finish_reason\":null}]}\n```", - "operationId": "chat_completion_v1_chat_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChatBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAICompletion" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/chunks": { - "post": { - "tags": [ - "Context Chunks" - ], - "summary": "Chunks Retrieval", - "description": "Given a `text`, returns the most relevant chunks from the ingested documents.\n\nThe returned information can be used to generate prompts that can be\npassed to `/completions` or `/chat/completions` APIs. Note: it is usually a very\nfast API, because only the Embeddings model is involved, not the LLM. The\nreturned information contains the relevant chunk `text` together with the source\n`document` it is coming from. It also contains a score that can be used to\ncompare different results.\n\nThe max number of chunks to be returned is set using the `limit` param.\n\nPrevious and next chunks (pieces of text that appear right before or after in the\ndocument) can be fetched by using the `prev_next_chunks` field.\n\nThe documents being used can be filtered using the `context_filter` and passing\nthe document IDs to be used. Ingested documents IDs can be found using\n`/ingest/list` endpoint. If you want all ingested documents to be used,\nremove `context_filter` altogether.", - "operationId": "chunks_retrieval_v1_chunks_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChunksBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChunksResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/ingest": { - "post": { - "tags": [ - "Ingestion" - ], - "summary": "Ingest", - "description": "Ingests and processes a file, storing its chunks to be used as context.\n\nThe context obtained from files is later used in\n`/chat/completions`, `/completions`, and `/chunks` APIs.\n\nMost common document\nformats are supported, but you may be prompted to install an extra dependency to\nmanage a specific file type.\n\nA file can generate different Documents (for example a PDF generates one Document\nper page). All Documents IDs are returned in the response, together with the\nextracted Metadata (which is later used to improve context retrieval). Those IDs\ncan be used to filter the context used to create responses in\n`/chat/completions`, `/completions`, and `/chunks` APIs.", - "operationId": "ingest_v1_ingest_post", - "requestBody": { - "content": { - "multipart/form-data": { - "schema": { - "$ref": "#/components/schemas/Body_ingest_v1_ingest_post" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/IngestResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/ingest/list": { - "get": { - "tags": [ - "Ingestion" - ], - "summary": "List Ingested", - "description": "Lists already ingested Documents including their Document ID and metadata.\n\nThose IDs can be used to filter the context used to create responses\nin `/chat/completions`, `/completions`, and `/chunks` APIs.", - "operationId": "list_ingested_v1_ingest_list_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/IngestResponse" - } - } - } - } - } - } - }, - "/v1/ingest/{doc_id}": { - "delete": { - "tags": [ - "Ingestion" - ], - "summary": "Delete Ingested", - "description": "Delete the specified ingested Document.\n\nThe `doc_id` can be obtained from the `GET /ingest/list` endpoint.\nThe document will be effectively deleted from your storage context.", - "operationId": "delete_ingested_v1_ingest__doc_id__delete", - "parameters": [ - { - "name": "doc_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Doc Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/embeddings": { - "post": { - "tags": [ - "Embeddings" - ], - "summary": "Embeddings Generation", - "description": "Get a vector representation of a given input.\n\nThat vector representation can be easily consumed\nby machine learning models and algorithms.", - "operationId": "embeddings_generation_v1_embeddings_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EmbeddingsBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EmbeddingsResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/health": { - "get": { - "tags": [ - "Health" - ], - "summary": "Health", - "description": "Return ok if the system is up.", - "operationId": "health_health_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HealthResponse" - } - } - } - } - } - } - } - }, - "components": { - "schemas": { - "Body_ingest_v1_ingest_post": { - "properties": { - "file": { - "type": "string", - "format": "binary", - "title": "File" - } - }, - "type": "object", - "required": [ - "file" - ], - "title": "Body_ingest_v1_ingest_post" - }, - "ChatBody": { - "properties": { - "messages": { - "items": { - "$ref": "#/components/schemas/OpenAIMessage" - }, - "type": "array", - "title": "Messages" - }, - "use_context": { - "type": "boolean", - "title": "Use Context", - "default": false - }, - "context_filter": { - "anyOf": [ - { - "$ref": "#/components/schemas/ContextFilter" - }, - { - "type": "null" - } - ] - }, - "include_sources": { - "type": "boolean", - "title": "Include Sources", - "default": true - }, - "stream": { - "type": "boolean", - "title": "Stream", - "default": false - } - }, - "type": "object", - "required": [ - "messages" - ], - "title": "ChatBody", - "examples": [ - { - "context_filter": { - "docs_ids": [ - "c202d5e6-7b69-4869-81cc-dd574ee8ee11" - ] - }, - "include_sources": true, - "messages": [ - { - "content": "How do you fry an egg?", - "role": "user" - } - ], - "stream": false, - "use_context": true - } - ] - }, - "Chunk": { - "properties": { - "object": { - "const": "context.chunk", - "title": "Object" - }, - "score": { - "type": "number", - "title": "Score", - "examples": [ - 0.023 - ] - }, - "document": { - "$ref": "#/components/schemas/IngestedDoc" - }, - "text": { - "type": "string", - "title": "Text", - "examples": [ - "Outbound sales increased 20%, driven by new leads." - ] - }, - "previous_texts": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Previous Texts", - "examples": [ - [ - "SALES REPORT 2023", - "Inbound didn't show major changes." - ] - ] - }, - "next_texts": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Next Texts", - "examples": [ - [ - "New leads came from Google Ads campaign.", - "The campaign was run by the Marketing Department" - ] - ] - } - }, - "type": "object", - "required": [ - "object", - "score", - "document", - "text" - ], - "title": "Chunk" - }, - "ChunksBody": { - "properties": { - "text": { - "type": "string", - "title": "Text", - "examples": [ - "Q3 2023 sales" - ] - }, - "context_filter": { - "anyOf": [ - { - "$ref": "#/components/schemas/ContextFilter" - }, - { - "type": "null" - } - ] - }, - "limit": { - "type": "integer", - "title": "Limit", - "default": 10 - }, - "prev_next_chunks": { - "type": "integer", - "title": "Prev Next Chunks", - "default": 0, - "examples": [ - 2 - ] - } - }, - "type": "object", - "required": [ - "text" - ], - "title": "ChunksBody" - }, - "ChunksResponse": { - "properties": { - "object": { - "const": "list", - "title": "Object" - }, - "model": { - "const": "private-gpt", - "title": "Model" - }, - "data": { - "items": { - "$ref": "#/components/schemas/Chunk" - }, - "type": "array", - "title": "Data" - } - }, - "type": "object", - "required": [ - "object", - "model", - "data" - ], - "title": "ChunksResponse" - }, - "CompletionsBody": { - "properties": { - "prompt": { - "type": "string", - "title": "Prompt" - }, - "use_context": { - "type": "boolean", - "title": "Use Context", - "default": false - }, - "context_filter": { - "anyOf": [ - { - "$ref": "#/components/schemas/ContextFilter" - }, - { - "type": "null" - } - ] - }, - "include_sources": { - "type": "boolean", - "title": "Include Sources", - "default": true - }, - "stream": { - "type": "boolean", - "title": "Stream", - "default": false - } - }, - "type": "object", - "required": [ - "prompt" - ], - "title": "CompletionsBody", - "examples": [ - { - "include_sources": false, - "prompt": "How do you fry an egg?", - "stream": false, - "use_context": false - } - ] - }, - "ContextFilter": { - "properties": { - "docs_ids": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Docs Ids", - "examples": [ - [ - "c202d5e6-7b69-4869-81cc-dd574ee8ee11" - ] - ] - } - }, - "type": "object", - "required": [ - "docs_ids" - ], - "title": "ContextFilter" - }, - "Embedding": { - "properties": { - "index": { - "type": "integer", - "title": "Index" - }, - "object": { - "const": "embedding", - "title": "Object" - }, - "embedding": { - "items": { - "type": "number" - }, - "type": "array", - "title": "Embedding", - "examples": [ - [ - 0.0023064255, - -0.009327292 - ] - ] - } - }, - "type": "object", - "required": [ - "index", - "object", - "embedding" - ], - "title": "Embedding" - }, - "EmbeddingsBody": { - "properties": { - "input": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - } - ], - "title": "Input" - } - }, - "type": "object", - "required": [ - "input" - ], - "title": "EmbeddingsBody" - }, - "EmbeddingsResponse": { - "properties": { - "object": { - "const": "list", - "title": "Object" - }, - "model": { - "const": "private-gpt", - "title": "Model" - }, - "data": { - "items": { - "$ref": "#/components/schemas/Embedding" - }, - "type": "array", - "title": "Data" - } - }, - "type": "object", - "required": [ - "object", - "model", - "data" - ], - "title": "EmbeddingsResponse" - }, - "HTTPValidationError": { - "properties": { - "detail": { - "items": { - "$ref": "#/components/schemas/ValidationError" - }, - "type": "array", - "title": "Detail" - } - }, - "type": "object", - "title": "HTTPValidationError" - }, - "HealthResponse": { - "properties": { - "status": { - "const": "ok", - "title": "Status", - "default": "ok" - } - }, - "type": "object", - "title": "HealthResponse" - }, - "IngestResponse": { - "properties": { - "object": { - "const": "list", - "title": "Object" - }, - "model": { - "const": "private-gpt", - "title": "Model" - }, - "data": { - "items": { - "$ref": "#/components/schemas/IngestedDoc" - }, - "type": "array", - "title": "Data" - } - }, - "type": "object", - "required": [ - "object", - "model", - "data" - ], - "title": "IngestResponse" - }, - "IngestedDoc": { - "properties": { - "object": { - "const": "ingest.document", - "title": "Object" - }, - "doc_id": { - "type": "string", - "title": "Doc Id", - "examples": [ - "c202d5e6-7b69-4869-81cc-dd574ee8ee11" - ] - }, - "doc_metadata": { - "anyOf": [ - { - "type": "object" - }, - { - "type": "null" - } - ], - "title": "Doc Metadata", - "examples": [ - { - "file_name": "Sales Report Q3 2023.pdf", - "page_label": "2" - } - ] - } - }, - "type": "object", - "required": [ - "object", - "doc_id", - "doc_metadata" - ], - "title": "IngestedDoc" - }, - "OpenAIChoice": { - "properties": { - "finish_reason": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Finish Reason", - "examples": [ - "stop" - ] - }, - "delta": { - "anyOf": [ - { - "$ref": "#/components/schemas/OpenAIDelta" - }, - { - "type": "null" - } - ] - }, - "message": { - "anyOf": [ - { - "$ref": "#/components/schemas/OpenAIMessage" - }, - { - "type": "null" - } - ] - }, - "sources": { - "anyOf": [ - { - "items": { - "$ref": "#/components/schemas/Chunk" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Sources" - }, - "index": { - "type": "integer", - "title": "Index", - "default": 0 - } - }, - "type": "object", - "required": [ - "finish_reason" - ], - "title": "OpenAIChoice", - "description": "Response from AI.\n\nEither the delta or the message will be present, but never both.\nSources used will be returned in case context retrieval was enabled." - }, - "OpenAICompletion": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "object": { - "type": "string", - "enum": [ - "completion", - "completion.chunk" - ], - "title": "Object", - "default": "completion" - }, - "created": { - "type": "integer", - "title": "Created", - "examples": [ - 1623340000 - ] - }, - "model": { - "const": "private-gpt", - "title": "Model" - }, - "choices": { - "items": { - "$ref": "#/components/schemas/OpenAIChoice" - }, - "type": "array", - "title": "Choices" - } - }, - "type": "object", - "required": [ - "id", - "created", - "model", - "choices" - ], - "title": "OpenAICompletion", - "description": "Clone of OpenAI Completion model.\n\nFor more information see: https://platform.openai.com/docs/api-reference/chat/object" - }, - "OpenAIDelta": { - "properties": { - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Content" - } - }, - "type": "object", - "required": [ - "content" - ], - "title": "OpenAIDelta", - "description": "A piece of completion that needs to be concatenated to get the full message." - }, - "OpenAIMessage": { - "properties": { - "role": { - "type": "string", - "enum": [ - "assistant", - "system", - "user" - ], - "title": "Role", - "default": "user" - }, - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Content" - } - }, - "type": "object", - "required": [ - "content" - ], - "title": "OpenAIMessage", - "description": "Inference result, with the source of the message.\n\nRole could be the assistant or system\n(providing a default response, not AI generated)." - }, - "ValidationError": { - "properties": { - "loc": { - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - } - ] - }, - "type": "array", - "title": "Location" - }, - "msg": { - "type": "string", - "title": "Message" - }, - "type": { - "type": "string", - "title": "Error Type" - } - }, - "type": "object", - "required": [ - "loc", - "msg", - "type" - ], - "title": "ValidationError" - } - } - }, - "tags": [ - { - "name": "Ingestion", - "description": "High-level APIs covering document ingestion -internally managing document parsing, splitting,metadata extraction, embedding generation and storage- and ingested documents CRUD.Each ingested document is identified by an ID that can be used to filter the contextused in *Contextual Completions* and *Context Chunks* APIs." - }, - { - "name": "Contextual Completions", - "description": "High-level APIs covering contextual Chat and Completions. They follow OpenAI's format, extending it to allow using the context coming from ingested documents to create the response. Internallymanage context retrieval, prompt engineering and the response generation." - }, - { - "name": "Context Chunks", - "description": "Low-level API that given a query return relevant chunks of text coming from the ingesteddocuments." - }, - { - "name": "Embeddings", - "description": "Low-level API to obtain the vector representation of a given text, using an Embeddings model.Follows OpenAI's embeddings API format." - }, - { - "name": "Health", - "description": "Simple health API to make sure the server is up and running." - } - ] -} \ No newline at end of file diff --git a/fern/openapi/openapi.json b/fern/openapi/openapi.json index 06fffa6..0fa73d5 100644 --- a/fern/openapi/openapi.json +++ b/fern/openapi/openapi.json @@ -1,20 +1,8 @@ { "openapi": "3.1.0", "info": { - "title": "PrivateGPT", - "summary": "PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.", - "description": "", - "contact": { - "url": "https://github.com/imartinez/privateGPT" - }, - "license": { - "name": "Apache 2.0", - "url": "https://www.apache.org/licenses/LICENSE-2.0.html" - }, - "version": "0.1.0", - "x-logo": { - "url": "https://lh3.googleusercontent.com/drive-viewer/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGjE1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560" - } + "title": "FastAPI", + "version": "0.1.0" }, "paths": { "/v1/completions": { @@ -65,7 +53,7 @@ "Contextual Completions" ], "summary": "Chat Completion", - "description": "Given a list of messages comprising a conversation, return a response.\n\nOptionally include a `system_prompt` to influence the way the LLM answers.\n\nIf `use_context` is set to `true`, the model will use context coming\nfrom the ingested documents to create the response. The documents being used can\nbe filtered using the `context_filter` and passing the document IDs to be used.\nIngested documents IDs can be found using `/ingest/list` endpoint. If you want\nall ingested documents to be used, remove `context_filter` altogether.\n\nWhen using `'include_sources': true`, the API will return the source Chunks used\nto create the response, which come from the context provided.\n\nWhen using `'stream': true`, the API will return data chunks following [OpenAI's\nstreaming model](https://platform.openai.com/docs/api-reference/chat/streaming):\n```\n{\"id\":\"12345\",\"object\":\"completion.chunk\",\"created\":1694268190,\n\"model\":\"private-gpt\",\"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\n\"finish_reason\":null}]}\n```", + "description": "Given a list of messages comprising a conversation, return a response.\n\nOptionally include an initial `role: system` message to influence the way\nthe LLM answers.\n\nIf `use_context` is set to `true`, the model will use context coming\nfrom the ingested documents to create the response. The documents being used can\nbe filtered using the `context_filter` and passing the document IDs to be used.\nIngested documents IDs can be found using `/ingest/list` endpoint. If you want\nall ingested documents to be used, remove `context_filter` altogether.\n\nWhen using `'include_sources': true`, the API will return the source Chunks used\nto create the response, which come from the context provided.\n\nWhen using `'stream': true`, the API will return data chunks following [OpenAI's\nstreaming model](https://platform.openai.com/docs/api-reference/chat/streaming):\n```\n{\"id\":\"12345\",\"object\":\"completion.chunk\",\"created\":1694268190,\n\"model\":\"private-gpt\",\"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\n\"finish_reason\":null}]}\n```", "operationId": "chat_completion_v1_chat_completions_post", "requestBody": { "content": { @@ -338,17 +326,6 @@ "type": "array", "title": "Messages" }, - "system_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "System Prompt" - }, "use_context": { "type": "boolean", "title": "Use Context", @@ -389,13 +366,16 @@ }, "include_sources": true, "messages": [ + { + "content": "You are a rapper. Always answer with a rap.", + "role": "system" + }, { "content": "How do you fry an egg?", "role": "user" } ], "stream": false, - "system_prompt": "You are a rapper. Always answer with a rap.", "use_context": true } ] @@ -591,6 +571,7 @@ "include_sources": false, "prompt": "How do you fry an egg?", "stream": false, + "system_prompt": "You are a rapper. Always answer with a rap.", "use_context": false } ] @@ -986,27 +967,5 @@ "title": "ValidationError" } } - }, - "tags": [ - { - "name": "Ingestion", - "description": "High-level APIs covering document ingestion -internally managing document parsing, splitting,metadata extraction, embedding generation and storage- and ingested documents CRUD.Each ingested document is identified by an ID that can be used to filter the contextused in *Contextual Completions* and *Context Chunks* APIs." - }, - { - "name": "Contextual Completions", - "description": "High-level APIs covering contextual Chat and Completions. They follow OpenAI's format, extending it to allow using the context coming from ingested documents to create the response. Internallymanage context retrieval, prompt engineering and the response generation." - }, - { - "name": "Context Chunks", - "description": "Low-level API that given a query return relevant chunks of text coming from the ingesteddocuments." - }, - { - "name": "Embeddings", - "description": "Low-level API to obtain the vector representation of a given text, using an Embeddings model.Follows OpenAI's embeddings API format." - }, - { - "name": "Health", - "description": "Simple health API to make sure the server is up and running." - } - ] + } } \ No newline at end of file diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py index e65f0ed..791e841 100644 --- a/private_gpt/launcher.py +++ b/private_gpt/launcher.py @@ -1,13 +1,10 @@ """FastAPI app creation, logger configuration and main API routes.""" import logging -from typing import Any from fastapi import Depends, FastAPI, Request from fastapi.middleware.cors import CORSMiddleware -from fastapi.openapi.utils import get_openapi from injector import Injector -from private_gpt.paths import docs_path from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router from private_gpt.server.completions.completions_router import completions_router @@ -22,107 +19,35 @@ logger = logging.getLogger(__name__) def create_app(root_injector: Injector) -> FastAPI: # Start the API - with open(docs_path / "description.md") as description_file: - description = description_file.read() + async def bind_injector_to_request(request: Request) -> None: + request.state.injector = root_injector - tags_metadata = [ - { - "name": "Ingestion", - "description": "High-level APIs covering document ingestion -internally " - "managing document parsing, splitting," - "metadata extraction, embedding generation and storage- and ingested " - "documents CRUD." - "Each ingested document is identified by an ID that can be used to filter the " - "context" - "used in *Contextual Completions* and *Context Chunks* APIs.", - }, - { - "name": "Contextual Completions", - "description": "High-level APIs covering contextual Chat and Completions. They " - "follow OpenAI's format, extending it to " - "allow using the context coming from ingested documents to create the " - "response. Internally" - "manage context retrieval, prompt engineering and the response generation.", - }, - { - "name": "Context Chunks", - "description": "Low-level API that given a query return relevant chunks of " - "text coming from the ingested" - "documents.", - }, - { - "name": "Embeddings", - "description": "Low-level API to obtain the vector representation of a given " - "text, using an Embeddings model." - "Follows OpenAI's embeddings API format.", - }, - { - "name": "Health", - "description": "Simple health API to make sure the server is up and running.", - }, - ] + app = FastAPI(dependencies=[Depends(bind_injector_to_request)]) - async def bind_injector_to_request(request: Request) -> None: - request.state.injector = root_injector + app.include_router(completions_router) + app.include_router(chat_router) + app.include_router(chunks_router) + app.include_router(ingest_router) + app.include_router(embeddings_router) + app.include_router(health_router) - app = FastAPI(dependencies=[Depends(bind_injector_to_request)]) + settings = root_injector.get(Settings) + if settings.server.cors.enabled: + logger.debug("Setting up CORS middleware") + app.add_middleware( + CORSMiddleware, + allow_credentials=settings.server.cors.allow_credentials, + allow_origins=settings.server.cors.allow_origins, + allow_origin_regex=settings.server.cors.allow_origin_regex, + allow_methods=settings.server.cors.allow_methods, + allow_headers=settings.server.cors.allow_headers, + ) - def custom_openapi() -> dict[str, Any]: - if app.openapi_schema: - return app.openapi_schema - openapi_schema = get_openapi( - title="PrivateGPT", - description=description, - version="0.1.0", - summary="PrivateGPT is a production-ready AI project that allows you to " - "ask questions to your documents using the power of Large Language " - "Models (LLMs), even in scenarios without Internet connection. " - "100% private, no data leaves your execution environment at any point.", - contact={ - "url": "https://github.com/imartinez/privateGPT", - }, - license_info={ - "name": "Apache 2.0", - "url": "https://www.apache.org/licenses/LICENSE-2.0.html", - }, - routes=app.routes, - tags=tags_metadata, - ) - openapi_schema["info"]["x-logo"] = { - "url": "https://lh3.googleusercontent.com/drive-viewer" - "/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGj" - "E1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560" - } + if settings.ui.enabled: + logger.debug("Importing the UI module") + from private_gpt.ui.ui import PrivateGptUi - app.openapi_schema = openapi_schema - return app.openapi_schema + ui = root_injector.get(PrivateGptUi) + ui.mount_in_app(app, settings.ui.path) - app.openapi = custom_openapi # type: ignore[method-assign] - - app.include_router(completions_router) - app.include_router(chat_router) - app.include_router(chunks_router) - app.include_router(ingest_router) - app.include_router(embeddings_router) - app.include_router(health_router) - - settings = root_injector.get(Settings) - if settings.server.cors.enabled: - logger.debug("Setting up CORS middleware") - app.add_middleware( - CORSMiddleware, - allow_credentials=settings.server.cors.allow_credentials, - allow_origins=settings.server.cors.allow_origins, - allow_origin_regex=settings.server.cors.allow_origin_regex, - allow_methods=settings.server.cors.allow_methods, - allow_headers=settings.server.cors.allow_headers, - ) - - if settings.ui.enabled: - logger.debug("Importing the UI module") - from private_gpt.ui.ui import PrivateGptUi - - ui = root_injector.get(PrivateGptUi) - ui.mount_in_app(app, settings.ui.path) - - return app + return app From a3ed14c58f77351dbd5f8f2d7868d1642a44f017 Mon Sep 17 00:00:00 2001 From: Louis Melchior Date: Fri, 8 Dec 2023 23:13:51 +0100 Subject: [PATCH 004/127] feat(llm): drop default_system_prompt (#1385) As discussed on Discord, the decision has been made to remove the system prompts by default, to better segregate the API and the UI usages. A concurrent PR (#1353) is enabling the dynamic setting of a system prompt in the UI. Therefore, if UI users want to use a custom system prompt, they can specify one directly in the UI. If the API users want to use a custom prompt, they can pass it directly into their messages that they are passing to the API. In the highlight of the two use case above, it becomes clear that default system_prompt does not need to exist. --- private_gpt/components/llm/llm_component.py | 5 +- private_gpt/components/llm/prompt_helper.py | 65 +++++---------------- private_gpt/settings/settings.py | 9 --- tests/test_prompt_helper.py | 34 +---------- 4 files changed, 17 insertions(+), 96 deletions(-) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index cfe3a73..bcb4ff2 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -23,10 +23,7 @@ class LLMComponent: case "local": from llama_index.llms import LlamaCPP - prompt_style_cls = get_prompt_style(settings.local.prompt_style) - prompt_style = prompt_style_cls( - default_system_prompt=settings.local.default_system_prompt - ) + prompt_style = get_prompt_style(settings.local.prompt_style) self.llm = LlamaCPP( model_path=str(models_path / settings.local.llm_hf_model_file), diff --git a/private_gpt/components/llm/prompt_helper.py b/private_gpt/components/llm/prompt_helper.py index e47b3fb..a8ca60f 100644 --- a/private_gpt/components/llm/prompt_helper.py +++ b/private_gpt/components/llm/prompt_helper.py @@ -5,7 +5,6 @@ from typing import Any, Literal from llama_index.llms import ChatMessage, MessageRole from llama_index.llms.llama_utils import ( - DEFAULT_SYSTEM_PROMPT, completion_to_prompt, messages_to_prompt, ) @@ -29,7 +28,6 @@ class AbstractPromptStyle(abc.ABC): series of messages into a prompt. """ - @abc.abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: logger.debug("Initializing prompt_style=%s", self.__class__.__name__) @@ -52,15 +50,6 @@ class AbstractPromptStyle(abc.ABC): return prompt -class AbstractPromptStyleWithSystemPrompt(AbstractPromptStyle, abc.ABC): - _DEFAULT_SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT - - def __init__(self, default_system_prompt: str | None) -> None: - super().__init__() - logger.debug("Got default_system_prompt='%s'", default_system_prompt) - self.default_system_prompt = default_system_prompt - - class DefaultPromptStyle(AbstractPromptStyle): """Default prompt style that uses the defaults from llama_utils. @@ -83,7 +72,7 @@ class DefaultPromptStyle(AbstractPromptStyle): return "" -class Llama2PromptStyle(AbstractPromptStyleWithSystemPrompt): +class Llama2PromptStyle(AbstractPromptStyle): """Simple prompt style that just uses the default llama_utils functions. It transforms the sequence of messages into a prompt that should look like: @@ -94,18 +83,14 @@ class Llama2PromptStyle(AbstractPromptStyleWithSystemPrompt): ``` """ - def __init__(self, default_system_prompt: str | None = None) -> None: - # If no system prompt is given, the default one of the implementation is used. - super().__init__(default_system_prompt=default_system_prompt) - def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str: - return messages_to_prompt(messages, self.default_system_prompt) + return messages_to_prompt(messages) def _completion_to_prompt(self, completion: str) -> str: - return completion_to_prompt(completion, self.default_system_prompt) + return completion_to_prompt(completion) -class TagPromptStyle(AbstractPromptStyleWithSystemPrompt): +class TagPromptStyle(AbstractPromptStyle): """Tag prompt style (used by Vigogne) that uses the prompt style `<|ROLE|>`. It transforms the sequence of messages into a prompt that should look like: @@ -119,37 +104,8 @@ class TagPromptStyle(AbstractPromptStyleWithSystemPrompt): FIXME: should we add surrounding `` and `` tags, like in llama2? """ - def __init__(self, default_system_prompt: str | None = None) -> None: - # We have to define a default system prompt here as the LLM will not - # use the default llama_utils functions. - default_system_prompt = default_system_prompt or self._DEFAULT_SYSTEM_PROMPT - super().__init__(default_system_prompt) - self.system_prompt: str = default_system_prompt - def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str: - messages = list(messages) - if messages[0].role != MessageRole.SYSTEM: - logger.info( - "Adding system_promt='%s' to the given messages as there are none given in the session", - self.system_prompt, - ) - messages = [ - ChatMessage(content=self.system_prompt, role=MessageRole.SYSTEM), - *messages, - ] - return self._format_messages_to_prompt(messages) - - def _completion_to_prompt(self, completion: str) -> str: - return ( - f"<|system|>: {self.system_prompt.strip()}\n" - f"<|user|>: {completion.strip()}\n" - "<|assistant|>: " - ) - - @staticmethod - def _format_messages_to_prompt(messages: list[ChatMessage]) -> str: """Format message to prompt with `<|ROLE|>: MSG` style.""" - assert messages[0].role == MessageRole.SYSTEM prompt = "" for message in messages: role = message.role @@ -161,19 +117,24 @@ class TagPromptStyle(AbstractPromptStyleWithSystemPrompt): prompt += "<|assistant|>: " return prompt + def _completion_to_prompt(self, completion: str) -> str: + return self._messages_to_prompt( + [ChatMessage(content=completion, role=MessageRole.USER)] + ) + def get_prompt_style( prompt_style: Literal["default", "llama2", "tag"] | None -) -> type[AbstractPromptStyle]: +) -> AbstractPromptStyle: """Get the prompt style to use from the given string. :param prompt_style: The prompt style to use. :return: The prompt style to use. """ if prompt_style is None or prompt_style == "default": - return DefaultPromptStyle + return DefaultPromptStyle() elif prompt_style == "llama2": - return Llama2PromptStyle + return Llama2PromptStyle() elif prompt_style == "tag": - return TagPromptStyle + return TagPromptStyle() raise ValueError(f"Unknown prompt_style='{prompt_style}'") diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 125396c..5d63103 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -108,15 +108,6 @@ class LocalSettings(BaseModel): "`llama2` is the historic behaviour. `default` might work better with your custom models." ), ) - default_system_prompt: str | None = Field( - None, - description=( - "The default system prompt to use for the chat engine. " - "If none is given - use the default system prompt (from the llama_index). " - "Please note that the default prompt might not be the same for all prompt styles. " - "Also note that this is only used if the first message is not a system message. " - ), - ) class EmbeddingSettings(BaseModel): diff --git a/tests/test_prompt_helper.py b/tests/test_prompt_helper.py index 1f22a06..48cac0b 100644 --- a/tests/test_prompt_helper.py +++ b/tests/test_prompt_helper.py @@ -18,7 +18,7 @@ from private_gpt.components.llm.prompt_helper import ( ], ) def test_get_prompt_style_success(prompt_style, expected_prompt_style): - assert get_prompt_style(prompt_style) == expected_prompt_style + assert isinstance(get_prompt_style(prompt_style), expected_prompt_style) def test_get_prompt_style_failure(): @@ -45,20 +45,7 @@ def test_tag_prompt_style_format(): def test_tag_prompt_style_format_with_system_prompt(): - system_prompt = "This is a system prompt from configuration." - prompt_style = TagPromptStyle(default_system_prompt=system_prompt) - messages = [ - ChatMessage(content="Hello, how are you doing?", role=MessageRole.USER), - ] - - expected_prompt = ( - f"<|system|>: {system_prompt}\n" - "<|user|>: Hello, how are you doing?\n" - "<|assistant|>: " - ) - - assert prompt_style.messages_to_prompt(messages) == expected_prompt - + prompt_style = TagPromptStyle() messages = [ ChatMessage( content="FOO BAR Custom sys prompt from messages.", role=MessageRole.SYSTEM @@ -94,22 +81,7 @@ def test_llama2_prompt_style_format(): def test_llama2_prompt_style_with_system_prompt(): - system_prompt = "This is a system prompt from configuration." - prompt_style = Llama2PromptStyle(default_system_prompt=system_prompt) - messages = [ - ChatMessage(content="Hello, how are you doing?", role=MessageRole.USER), - ] - - expected_prompt = ( - " [INST] <>\n" - f" {system_prompt} \n" - "<>\n" - "\n" - " Hello, how are you doing? [/INST]" - ) - - assert prompt_style.messages_to_prompt(messages) == expected_prompt - + prompt_style = Llama2PromptStyle() messages = [ ChatMessage( content="FOO BAR Custom sys prompt from messages.", role=MessageRole.SYSTEM From a072a40a7c987518b9e923fc82b40e9deaa197e4 Mon Sep 17 00:00:00 2001 From: 3ly-13 <143585971+3ly-13@users.noreply.github.com> Date: Sat, 9 Dec 2023 13:13:00 -0600 Subject: [PATCH 005/127] Allow setting OpenAI model in settings (#1386) feat(settings): Allow setting openai model to be used. Default to GPT 3.5 --- fern/docs/pages/manual/llms.mdx | 2 ++ private_gpt/components/llm/llm_component.py | 6 ++++-- private_gpt/settings/settings.py | 4 ++++ settings.yaml | 1 + 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/fern/docs/pages/manual/llms.mdx b/fern/docs/pages/manual/llms.mdx index 6dbb4a6..c9b88e3 100644 --- a/fern/docs/pages/manual/llms.mdx +++ b/fern/docs/pages/manual/llms.mdx @@ -38,6 +38,8 @@ llm: openai: api_key: # You could skip this configuration and use the OPENAI_API_KEY env var instead + model: # Optional model to use. Default is "gpt-3.5-turbo" + # Note: Open AI Models are listed here [here](https://platform.openai.com/docs/models) ``` And run PrivateGPT loading that profile you just created: diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index bcb4ff2..88485f4 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -50,7 +50,9 @@ class LLMComponent: case "openai": from llama_index.llms import OpenAI - openai_settings = settings.openai.api_key - self.llm = OpenAI(api_key=openai_settings) + openai_settings = settings.openai + self.llm = OpenAI( + api_key=openai_settings.api_key, model=openai_settings.model + ) case "mock": self.llm = MockLLM() diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 5d63103..f4747d8 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -145,6 +145,10 @@ class SagemakerSettings(BaseModel): class OpenAISettings(BaseModel): api_key: str + model: str = Field( + "gpt-3.5-turbo", + description=("OpenAI Model to use. Example: 'gpt-4'."), + ) class UISettings(BaseModel): diff --git a/settings.yaml b/settings.yaml index 815ed09..036f5bd 100644 --- a/settings.yaml +++ b/settings.yaml @@ -49,3 +49,4 @@ sagemaker: openai: api_key: ${OPENAI_API_KEY:} + model: gpt-3.5-turbo \ No newline at end of file From 145f3ec9f41c4def5abf4065a06fb0786e2d992a Mon Sep 17 00:00:00 2001 From: 3ly-13 <143585971+3ly-13@users.noreply.github.com> Date: Sun, 10 Dec 2023 12:45:14 -0600 Subject: [PATCH 006/127] feat(ui): Allows User to Set System Prompt via "Additional Options" in Chat Interface (#1353) --- fern/docs/pages/manual/llms.mdx | 2 +- fern/docs/pages/manual/ui.mdx | 31 ++++++++++++- private_gpt/settings/settings.py | 9 +++- private_gpt/ui/ui.py | 79 ++++++++++++++++++++++++++------ settings.yaml | 7 +++ 5 files changed, 110 insertions(+), 18 deletions(-) diff --git a/fern/docs/pages/manual/llms.mdx b/fern/docs/pages/manual/llms.mdx index c9b88e3..8b56f75 100644 --- a/fern/docs/pages/manual/llms.mdx +++ b/fern/docs/pages/manual/llms.mdx @@ -39,7 +39,7 @@ llm: openai: api_key: # You could skip this configuration and use the OPENAI_API_KEY env var instead model: # Optional model to use. Default is "gpt-3.5-turbo" - # Note: Open AI Models are listed here [here](https://platform.openai.com/docs/models) + # Note: Open AI Models are listed here: https://platform.openai.com/docs/models ``` And run PrivateGPT loading that profile you just created: diff --git a/fern/docs/pages/manual/ui.mdx b/fern/docs/pages/manual/ui.mdx index ddc4d04..ed095fe 100644 --- a/fern/docs/pages/manual/ui.mdx +++ b/fern/docs/pages/manual/ui.mdx @@ -35,5 +35,32 @@ database* section in the documentation. Normal chat interface, self-explanatory ;) -You can check the actual prompt being passed to the LLM by looking at the logs of -the server. We'll add better observability in future releases. \ No newline at end of file +#### System Prompt +You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs" +in the chat interface. The system prompt is also logged on the server. + +By default, the `Query Docs` mode uses the setting value `ui.default_query_system_prompt`. + +The `LLM Chat` mode attempts to use the optional settings value `ui.default_chat_system_prompt`. + +If no system prompt is entered, the UI will display the default system prompt being used +for the active mode. + +##### System Prompt Examples: + +The system prompt can effectively provide your chat bot specialized roles, and results tailored to the prompt +you have given the model. Examples of system prompts can be be found +[here](https://www.w3schools.com/gen_ai/chatgpt-3-5/chatgpt-3-5_roles.php). + +Some interesting examples to try include: + +* You are -X-. You have all the knowledge and personality of -X-. Answer as if you were -X- using +their manner of speaking and vocabulary. + * Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare. + Answer as if you were Shakespeare using their manner of speaking and vocabulary. +* You are an expert (at) -role-. Answer all questions using your expertise on -specific domain topic-. + * Example: You are an expert software engineer. Answer all questions using your expertise on Python. +* You are a -role- bot, respond with -response criteria needed-. If no -response criteria- is needed, +respond with -alternate response-. + * Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections + are needed, respond with "verified". \ No newline at end of file diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index f4747d8..8b03f61 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -147,13 +147,20 @@ class OpenAISettings(BaseModel): api_key: str model: str = Field( "gpt-3.5-turbo", - description=("OpenAI Model to use. Example: 'gpt-4'."), + description="OpenAI Model to use. Example: 'gpt-4'.", ) class UISettings(BaseModel): enabled: bool path: str + default_chat_system_prompt: str = Field( + None, + description="The default system prompt to use for the chat mode.", + ) + default_query_system_prompt: str = Field( + None, description="The default system prompt to use for the query mode." + ) class QdrantSettings(BaseModel): diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index eeddb0f..ad6052b 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -30,6 +30,8 @@ UI_TAB_TITLE = "My Private GPT" SOURCES_SEPARATOR = "\n\n Sources: \n" +MODES = ["Query Docs", "Search in Docs", "LLM Chat"] + class Source(BaseModel): file: str @@ -71,6 +73,10 @@ class PrivateGptUi: # Cache the UI blocks self._ui_block = None + # Initialize system prompt based on default mode + self.mode = MODES[0] + self._system_prompt = self._get_default_system_prompt(self.mode) + def _chat(self, message: str, history: list[list[str]], mode: str, *_: Any) -> Any: def yield_deltas(completion_gen: CompletionGen) -> Iterable[str]: full_response: str = "" @@ -114,25 +120,22 @@ class PrivateGptUi: new_message = ChatMessage(content=message, role=MessageRole.USER) all_messages = [*build_history(), new_message] + # If a system prompt is set, add it as a system message + if self._system_prompt: + all_messages.insert( + 0, + ChatMessage( + content=self._system_prompt, + role=MessageRole.SYSTEM, + ), + ) match mode: case "Query Docs": - # Add a system message to force the behaviour of the LLM - # to answer only questions about the provided context. - all_messages.insert( - 0, - ChatMessage( - content="You can only answer questions about the provided context. If you know the answer " - "but it is not based in the provided context, don't provide the answer, just state " - "the answer is not in the context provided.", - role=MessageRole.SYSTEM, - ), - ) query_stream = self._chat_service.stream_chat( messages=all_messages, use_context=True, ) yield from yield_deltas(query_stream) - case "LLM Chat": llm_stream = self._chat_service.stream_chat( messages=all_messages, @@ -154,6 +157,37 @@ class PrivateGptUi: for index, source in enumerate(sources, start=1) ) + # On initialization and on mode change, this function set the system prompt + # to the default prompt based on the mode (and user settings). + @staticmethod + def _get_default_system_prompt(mode: str) -> str: + p = "" + match mode: + # For query chat mode, obtain default system prompt from settings + case "Query Docs": + p = settings().ui.default_query_system_prompt + # For chat mode, obtain default system prompt from settings + case "LLM Chat": + p = settings().ui.default_chat_system_prompt + # For any other mode, clear the system prompt + case _: + p = "" + return p + + def _set_system_prompt(self, system_prompt_input: str) -> None: + logger.info(f"Setting system prompt to: {system_prompt_input}") + self._system_prompt = system_prompt_input + + def _set_current_mode(self, mode: str) -> Any: + self.mode = mode + self._set_system_prompt(self._get_default_system_prompt(mode)) + # Update placeholder and allow interaction if default system prompt is set + if self._system_prompt: + return gr.update(placeholder=self._system_prompt, interactive=True) + # Update placeholder and disable interaction if no default system prompt is set + else: + return gr.update(placeholder=self._system_prompt, interactive=False) + def _list_ingested_files(self) -> list[list[str]]: files = set() for ingested_document in self._ingest_service.list_ingested(): @@ -193,7 +227,7 @@ class PrivateGptUi: with gr.Row(): with gr.Column(scale=3, variant="compact"): mode = gr.Radio( - ["Query Docs", "Search in Docs", "LLM Chat"], + MODES, label="Mode", value="Query Docs", ) @@ -220,6 +254,23 @@ class PrivateGptUi: outputs=ingested_dataset, ) ingested_dataset.render() + system_prompt_input = gr.Textbox( + placeholder=self._system_prompt, + label="System Prompt", + lines=2, + interactive=True, + render=False, + ) + # When mode changes, set default system prompt + mode.change( + self._set_current_mode, inputs=mode, outputs=system_prompt_input + ) + # On blur, set system prompt to use in queries + system_prompt_input.blur( + self._set_system_prompt, + inputs=system_prompt_input, + ) + with gr.Column(scale=7): _ = gr.ChatInterface( self._chat, @@ -232,7 +283,7 @@ class PrivateGptUi: AVATAR_BOT, ), ), - additional_inputs=[mode, upload_button], + additional_inputs=[mode, upload_button, system_prompt_input], ) return blocks diff --git a/settings.yaml b/settings.yaml index 036f5bd..af51a7f 100644 --- a/settings.yaml +++ b/settings.yaml @@ -22,6 +22,13 @@ data: ui: enabled: true path: / + default_chat_system_prompt: "You are a helpful, respectful and honest assistant. + Always answer as helpfully as possible and follow ALL given instructions. + Do not speculate or make up information. + Do not reference any given instructions or context." + default_query_system_prompt: "You can only answer questions about the provided context. + If you know the answer but it is not based in the provided context, don't provide + the answer, just state the answer is not in the context provided." llm: mode: local From e8ac51bba4b698c8a66dfd02bda5020f4a08f0cd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Dec 2023 20:08:12 +0100 Subject: [PATCH 007/127] chore(main): release 0.2.0 (#1387) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 8 ++++++++ version.txt | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f589e4..2f55e55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [0.2.0](https://github.com/imartinez/privateGPT/compare/v0.1.0...v0.2.0) (2023-12-10) + + +### Features + +* **llm:** drop default_system_prompt ([#1385](https://github.com/imartinez/privateGPT/issues/1385)) ([a3ed14c](https://github.com/imartinez/privateGPT/commit/a3ed14c58f77351dbd5f8f2d7868d1642a44f017)) +* **ui:** Allows User to Set System Prompt via "Additional Options" in Chat Interface ([#1353](https://github.com/imartinez/privateGPT/issues/1353)) ([145f3ec](https://github.com/imartinez/privateGPT/commit/145f3ec9f41c4def5abf4065a06fb0786e2d992a)) + ## [0.1.0](https://github.com/imartinez/privateGPT/compare/v0.0.2...v0.1.0) (2023-11-30) diff --git a/version.txt b/version.txt index 6e8bf73..0ea3a94 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.0 +0.2.0 From 1d28ae2915587ae3c582059b6b89467d82edfa77 Mon Sep 17 00:00:00 2001 From: Federico Grandi Date: Tue, 12 Dec 2023 20:31:38 +0100 Subject: [PATCH 008/127] docs: fix minor capitalization typo (#1392) --- README.md | 4 ++-- fern/docs.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 52bf032..d721b3d 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ Don't know what to contribute? Here is the public [Project Board](https://github.com/users/imartinez/projects/3) with several ideas. Head over to Discord -#contributors channel and ask for write permissions on that Github project. +#contributors channel and ask for write permissions on that GitHub project. ## 💬 Community Join the conversation around PrivateGPT on our: @@ -158,4 +158,4 @@ This project has been strongly influenced and supported by other amazing project [GPT4All](https://github.com/nomic-ai/gpt4all), [LlamaCpp](https://github.com/ggerganov/llama.cpp), [Chroma](https://www.trychroma.com/) -and [SentenceTransformers](https://www.sbert.net/). \ No newline at end of file +and [SentenceTransformers](https://www.sbert.net/). diff --git a/fern/docs.yml b/fern/docs.yml index d3b0025..6702167 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -89,7 +89,7 @@ navigation: # `type:primary` is always displayed at the most right side of the navbar navbar-links: - type: secondary - text: Github + text: GitHub url: "https://github.com/imartinez/privateGPT" - type: secondary text: Contact us From 3582764801c692f53143a2f895c87005f845a4b7 Mon Sep 17 00:00:00 2001 From: Federico Grandi Date: Tue, 12 Dec 2023 20:33:34 +0100 Subject: [PATCH 009/127] ci: fix preview docs checkout ref (#1393) --- .github/workflows/preview-docs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/preview-docs.yml b/.github/workflows/preview-docs.yml index 21ecf95..b865777 100644 --- a/.github/workflows/preview-docs.yml +++ b/.github/workflows/preview-docs.yml @@ -14,6 +14,8 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + ref: refs/pull/${{ github.event.pull_request.number }}/merge - name: Setup Node.js uses: actions/setup-node@v4 From 4e496e970a3263f28608cb9d4774200f168e3e2f Mon Sep 17 00:00:00 2001 From: Eliott Bouhana <47679741+eliottness@users.noreply.github.com> Date: Fri, 15 Dec 2023 21:35:02 +0100 Subject: [PATCH 010/127] docs: remove misleading comment about pgpt working with python 3.12 (#1394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I was misled into believing I could install using python 3.12 whereas the pyproject.toml explicitly states otherwise. This PR only removes this comment to make sure other people are not also trapped 😄 --- fern/docs/pages/installation/installation.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index baad4a4..c45bb0d 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -10,7 +10,7 @@ ``` * Install Python `3.11` (*if you do not have it already*). Ideally through a python version manager like `pyenv`. - Python 3.12 should work too. Earlier python versions are not supported. + Earlier python versions are not supported. * osx/linux: [pyenv](https://github.com/pyenv/pyenv) * windows: [pyenv-win](https://github.com/pyenv-win/pyenv-win) @@ -232,4 +232,4 @@ To install a C++ compiler on Windows 10/11, follow these steps: When running a Mac with Intel hardware (not M1), you may run into _clang: error: the clang compiler does not support ' -march=native'_ during pip install. -If so set your archflags during pip install. eg: _ARCHFLAGS="-arch x86_64" pip3 install -r requirements.txt_ \ No newline at end of file +If so set your archflags during pip install. eg: _ARCHFLAGS="-arch x86_64" pip3 install -r requirements.txt_ From 2564f8d2bb8c4332a6a0ab6d722a2ac15006b85f Mon Sep 17 00:00:00 2001 From: cognitivetech Date: Sat, 16 Dec 2023 13:02:46 -0500 Subject: [PATCH 011/127] fix(settings): correct yaml multiline string (#1403) --- settings.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/settings.yaml b/settings.yaml index af51a7f..9d6ed4a 100644 --- a/settings.yaml +++ b/settings.yaml @@ -22,13 +22,15 @@ data: ui: enabled: true path: / - default_chat_system_prompt: "You are a helpful, respectful and honest assistant. + default_chat_system_prompt: > + You are a helpful, respectful and honest assistant. Always answer as helpfully as possible and follow ALL given instructions. Do not speculate or make up information. - Do not reference any given instructions or context." - default_query_system_prompt: "You can only answer questions about the provided context. - If you know the answer but it is not based in the provided context, don't provide - the answer, just state the answer is not in the context provided." + Do not reference any given instructions or context. + default_query_system_prompt: > + You can only answer questions about the provided context. + If you know the answer but it is not based in the provided context, don't provide + the answer, just state the answer is not in the context provided. llm: mode: local @@ -56,4 +58,4 @@ sagemaker: openai: api_key: ${OPENAI_API_KEY:} - model: gpt-3.5-turbo \ No newline at end of file + model: gpt-3.5-turbo From c71ae7cee92463bbc5ea9c434eab9f99166e1363 Mon Sep 17 00:00:00 2001 From: Rohit Das <43847374+therohitdas@users.noreply.github.com> Date: Sun, 17 Dec 2023 16:32:13 +0530 Subject: [PATCH 012/127] feat(ui): make chat area stretch to fill the screen (#1397) --- private_gpt/ui/ui.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index ad6052b..c23bd37 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -219,13 +219,17 @@ class PrivateGptUi: "justify-content: center;" "align-items: center;" "}" - ".logo img { height: 25% }", + ".logo img { height: 25% }" + ".contain { display: flex !important; flex-direction: column !important; }" + "#component-0, #component-3, #component-10, #component-8 { height: 100% !important; }" + "#chatbot { flex-grow: 1 !important; overflow: auto !important;}" + "#col { height: calc(100vh - 112px - 16px) !important; }", ) as blocks: with gr.Row(): gr.HTML(f"