mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 10:45:42 +01:00
Multi language support - fern debug (#1307)
--------- Co-authored-by: Louis <lpglm@orange.fr> Co-authored-by: LeMoussel <cnhx27@gmail.com>
This commit is contained in:
parent
e8d88f8952
commit
944c43bfa8
10 changed files with 402 additions and 8 deletions
|
|
@ -91,7 +91,28 @@ class VectorstoreSettings(BaseModel):
|
|||
class LocalSettings(BaseModel):
|
||||
llm_hf_repo_id: str
|
||||
llm_hf_model_file: str
|
||||
embedding_hf_model_name: str
|
||||
embedding_hf_model_name: str = Field(
|
||||
description="Name of the HuggingFace model to use for embeddings"
|
||||
)
|
||||
prompt_style: Literal["default", "llama2", "tag"] = Field(
|
||||
"llama2",
|
||||
description=(
|
||||
"The prompt style to use for the chat engine. "
|
||||
"If `default` - use the default prompt style from the llama_index. It should look like `role: message`.\n"
|
||||
"If `llama2` - use the llama2 prompt style from the llama_index. Based on `<s>`, `[INST]` and `<<SYS>>`.\n"
|
||||
"If `tag` - use the `tag` prompt style. It should look like `<|role|>: message`. \n"
|
||||
"`llama2` is the historic behaviour. `default` might work better with your custom models."
|
||||
),
|
||||
)
|
||||
default_system_prompt: str | None = Field(
|
||||
None,
|
||||
description=(
|
||||
"The default system prompt to use for the chat engine. "
|
||||
"If none is given - use the default system prompt (from the llama_index). "
|
||||
"Please note that the default prompt might not be the same for all prompt styles. "
|
||||
"Also note that this is only used if the first message is not a system message. "
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class SagemakerSettings(BaseModel):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue