mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 10:45:42 +01:00
feat(llm): adds serveral settings for llamacpp and ollama (#1703)
This commit is contained in:
parent
410bf7a71f
commit
02dc83e8e9
10 changed files with 91 additions and 8 deletions
|
|
@ -5,6 +5,7 @@ llm:
|
|||
mode: ollama
|
||||
max_new_tokens: 512
|
||||
context_window: 3900
|
||||
temperature: 0.1 #The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1)
|
||||
|
||||
embedding:
|
||||
mode: ollama
|
||||
|
|
@ -13,10 +14,14 @@ ollama:
|
|||
llm_model: mistral
|
||||
embedding_model: nomic-embed-text
|
||||
api_base: http://localhost:11434
|
||||
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
|
||||
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
|
||||
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
|
||||
repeat_last_n: 64 # Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)
|
||||
repeat_penalty: 1.2 # Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)
|
||||
|
||||
vectorstore:
|
||||
database: qdrant
|
||||
|
||||
qdrant:
|
||||
path: local_data/private_gpt/qdrant
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue