mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 07:40:12 +01:00
added llama3 prompt (#1962)
Some checks are pending
release-please / release-please (push) Waiting to run
tests / setup (push) Waiting to run
tests / ${{ matrix.quality-command }} (black) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (mypy) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (ruff) (push) Blocked by required conditions
tests / test (push) Blocked by required conditions
tests / all_checks_passed (push) Blocked by required conditions
Some checks are pending
release-please / release-please (push) Waiting to run
tests / setup (push) Waiting to run
tests / ${{ matrix.quality-command }} (black) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (mypy) (push) Blocked by required conditions
tests / ${{ matrix.quality-command }} (ruff) (push) Blocked by required conditions
tests / test (push) Blocked by required conditions
tests / all_checks_passed (push) Blocked by required conditions
* added llama3 prompt * more fixes to pass tests; changed type VectorStore -> BasePydanticVectorStore, see https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md#2024-05-14 * fix: new llama3 prompt --------- Co-authored-by: Javier Martinez <javiermartinezalvarez98@gmail.com>
This commit is contained in:
parent
d4375d078f
commit
d080969407
4 changed files with 137 additions and 9 deletions
|
|
@ -138,6 +138,73 @@ class Llama2PromptStyle(AbstractPromptStyle):
|
|||
)
|
||||
|
||||
|
||||
class Llama3PromptStyle(AbstractPromptStyle):
|
||||
r"""Template for Meta's Llama 3.1.
|
||||
|
||||
The format follows this structure:
|
||||
<|begin_of_text|>
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
[System message content]<|eot_id|>
|
||||
<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
[User message content]<|eot_id|>
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
[Assistant message content]<|eot_id|>
|
||||
...
|
||||
(Repeat for each message, including possible 'ipython' role)
|
||||
"""
|
||||
|
||||
BOS, EOS = "<|begin_of_text|>", "<|end_of_text|>"
|
||||
B_INST, E_INST = "<|start_header_id|>", "<|end_header_id|>"
|
||||
EOT = "<|eot_id|>"
|
||||
B_SYS, E_SYS = "<|start_header_id|>system<|end_header_id|>", "<|eot_id|>"
|
||||
ASSISTANT_INST = "<|start_header_id|>assistant<|end_header_id|>"
|
||||
DEFAULT_SYSTEM_PROMPT = """\
|
||||
You are a helpful, respectful and honest assistant. \
|
||||
Always answer as helpfully as possible and follow ALL given instructions. \
|
||||
Do not speculate or make up information. \
|
||||
Do not reference any given instructions or context. \
|
||||
"""
|
||||
|
||||
def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str:
|
||||
prompt = self.BOS
|
||||
has_system_message = False
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
if not message or message.content is None:
|
||||
continue
|
||||
if message.role == MessageRole.SYSTEM:
|
||||
prompt += f"{self.B_SYS}\n\n{message.content.strip()}{self.E_SYS}"
|
||||
has_system_message = True
|
||||
else:
|
||||
role_header = f"{self.B_INST}{message.role.value}{self.E_INST}"
|
||||
prompt += f"{role_header}\n\n{message.content.strip()}{self.EOT}"
|
||||
|
||||
# Add assistant header if the last message is not from the assistant
|
||||
if i == len(messages) - 1 and message.role != MessageRole.ASSISTANT:
|
||||
prompt += f"{self.ASSISTANT_INST}\n\n"
|
||||
|
||||
# Add default system prompt if no system message was provided
|
||||
if not has_system_message:
|
||||
prompt = (
|
||||
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
|
||||
+ prompt[len(self.BOS) :]
|
||||
)
|
||||
|
||||
# TODO: Implement tool handling logic
|
||||
|
||||
return prompt
|
||||
|
||||
def _completion_to_prompt(self, completion: str) -> str:
|
||||
return (
|
||||
f"{self.BOS}{self.B_SYS}\n\n{self.DEFAULT_SYSTEM_PROMPT}{self.E_SYS}"
|
||||
f"{self.B_INST}user{self.E_INST}\n\n{completion.strip()}{self.EOT}"
|
||||
f"{self.ASSISTANT_INST}\n\n"
|
||||
)
|
||||
|
||||
|
||||
class TagPromptStyle(AbstractPromptStyle):
|
||||
"""Tag prompt style (used by Vigogne) that uses the prompt style `<|ROLE|>`.
|
||||
|
||||
|
|
@ -219,7 +286,8 @@ class ChatMLPromptStyle(AbstractPromptStyle):
|
|||
|
||||
|
||||
def get_prompt_style(
|
||||
prompt_style: Literal["default", "llama2", "tag", "mistral", "chatml"] | None
|
||||
prompt_style: Literal["default", "llama2", "llama3", "tag", "mistral", "chatml"]
|
||||
| None
|
||||
) -> AbstractPromptStyle:
|
||||
"""Get the prompt style to use from the given string.
|
||||
|
||||
|
|
@ -230,6 +298,8 @@ def get_prompt_style(
|
|||
return DefaultPromptStyle()
|
||||
elif prompt_style == "llama2":
|
||||
return Llama2PromptStyle()
|
||||
elif prompt_style == "llama3":
|
||||
return Llama3PromptStyle()
|
||||
elif prompt_style == "tag":
|
||||
return TagPromptStyle()
|
||||
elif prompt_style == "mistral":
|
||||
|
|
|
|||
|
|
@ -111,12 +111,15 @@ class LLMSettings(BaseModel):
|
|||
0.1,
|
||||
description="The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual.",
|
||||
)
|
||||
prompt_style: Literal["default", "llama2", "tag", "mistral", "chatml"] = Field(
|
||||
prompt_style: Literal[
|
||||
"default", "llama2", "llama3", "tag", "mistral", "chatml"
|
||||
] = Field(
|
||||
"llama2",
|
||||
description=(
|
||||
"The prompt style to use for the chat engine. "
|
||||
"If `default` - use the default prompt style from the llama_index. It should look like `role: message`.\n"
|
||||
"If `llama2` - use the llama2 prompt style from the llama_index. Based on `<s>`, `[INST]` and `<<SYS>>`.\n"
|
||||
"If `llama3` - use the llama3 prompt style from the llama_index."
|
||||
"If `tag` - use the `tag` prompt style. It should look like `<|role|>: message`. \n"
|
||||
"If `mistral` - use the `mistral prompt style. It shoudl look like <s>[INST] {System Prompt} [/INST]</s>[INST] { UserInstructions } [/INST]"
|
||||
"`llama2` is the historic behaviour. `default` might work better with your custom models."
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue