mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 04:30:11 +01:00
fix: Adding an LLM param to fix broken generator from llamacpp (#1519)
This commit is contained in:
parent
e326126d0d
commit
869233f0e4
1 changed files with 1 additions and 1 deletions
|
|
@ -42,7 +42,7 @@ class LLMComponent:
|
|||
context_window=settings.llm.context_window,
|
||||
generate_kwargs={},
|
||||
# All to GPU
|
||||
model_kwargs={"n_gpu_layers": -1},
|
||||
model_kwargs={"n_gpu_layers": -1, "offload_kqv": True},
|
||||
# transform inputs into Llama2 format
|
||||
messages_to_prompt=prompt_style.messages_to_prompt,
|
||||
completion_to_prompt=prompt_style.completion_to_prompt,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue