diff --git a/settings-docker.yaml b/settings-docker.yaml index c68da41..3d78ff7 100644 --- a/settings-docker.yaml +++ b/settings-docker.yaml @@ -9,8 +9,8 @@ embedding: mode: ${PGPT_MODE:sagemaker} llamacpp: - llm_hf_repo_id: ${PGPT_HF_REPO_ID:TheBloke/Mistral-7B-Instruct-v0.1-GGUF} - llm_hf_model_file: ${PGPT_HF_MODEL_FILE:mistral-7b-instruct-v0.1.Q4_K_M.gguf} + llm_hf_repo_id: ${PGPT_HF_REPO_ID:lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF} + llm_hf_model_file: ${PGPT_HF_MODEL_FILE:Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf} huggingface: embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5} diff --git a/settings-local.yaml b/settings-local.yaml index 48eeb0e..596d7bc 100644 --- a/settings-local.yaml +++ b/settings-local.yaml @@ -7,12 +7,12 @@ llm: # Should be matching the selected model max_new_tokens: 512 context_window: 3900 - tokenizer: mistralai/Mistral-7B-Instruct-v0.2 - prompt_style: "mistral" + tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct + prompt_style: "llama3" llamacpp: - llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF - llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf + llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF + llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf embedding: mode: huggingface diff --git a/settings-vllm.yaml b/settings-vllm.yaml index 1bfab6b..db4ef75 100644 --- a/settings-vllm.yaml +++ b/settings-vllm.yaml @@ -4,7 +4,7 @@ server: llm: mode: openailike max_new_tokens: 512 - tokenizer: mistralai/Mistral-7B-Instruct-v0.2 + tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct temperature: 0.1 embedding: diff --git a/settings.yaml b/settings.yaml index 935defc..3f7a989 100644 --- a/settings.yaml +++ b/settings.yaml @@ -36,12 +36,12 @@ ui: llm: mode: llamacpp - prompt_style: "mistral" + prompt_style: "llama3" # Should be matching the selected model max_new_tokens: 512 context_window: 3900 # Select your tokenizer. Llama-index tokenizer is the default. - # tokenizer: mistralai/Mistral-7B-Instruct-v0.2 + # tokenizer: meta-llama/Meta-Llama-3.1-8B-Instruct temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) rag: @@ -62,8 +62,8 @@ clickhouse: database: embeddings llamacpp: - llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF - llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf + llm_hf_repo_id: lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF + llm_hf_model_file: Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: 1.0 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)