mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 04:30:11 +01:00
feat: make llama3.1 as default (#2022)
* feat: change ollama default model to llama3.1 * chore: bump versions * feat: Change default model in local mode to llama3.1 * chore: make sure last poetry version is used * fix: mypy * fix: do not add BOS (with last llamacpp-python version)
This commit is contained in:
parent
e54a8fe043
commit
9027d695c1
15 changed files with 2227 additions and 2419 deletions
|
|
@ -9,8 +9,8 @@ embedding:
|
|||
mode: ${PGPT_EMBED_MODE:mock}
|
||||
|
||||
llamacpp:
|
||||
llm_hf_repo_id: ${PGPT_HF_REPO_ID:TheBloke/Mistral-7B-Instruct-v0.1-GGUF}
|
||||
llm_hf_model_file: ${PGPT_HF_MODEL_FILE:mistral-7b-instruct-v0.1.Q4_K_M.gguf}
|
||||
llm_hf_repo_id: ${PGPT_HF_REPO_ID:lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF}
|
||||
llm_hf_model_file: ${PGPT_HF_MODEL_FILE:Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf}
|
||||
|
||||
huggingface:
|
||||
embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5}
|
||||
|
|
@ -20,7 +20,7 @@ sagemaker:
|
|||
embedding_endpoint_name: ${PGPT_SAGEMAKER_EMBEDDING_ENDPOINT_NAME:}
|
||||
|
||||
ollama:
|
||||
llm_model: ${PGPT_OLLAMA_LLM_MODEL:mistral}
|
||||
llm_model: ${PGPT_OLLAMA_LLM_MODEL:llama3.1}
|
||||
embedding_model: ${PGPT_OLLAMA_EMBEDDING_MODEL:nomic-embed-text}
|
||||
api_base: ${PGPT_OLLAMA_API_BASE:http://ollama:11434}
|
||||
embedding_api_base: ${PGPT_OLLAMA_EMBEDDING_API_BASE:http://ollama:11434}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue