mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 10:45:42 +01:00
Ingestion Speedup Multiple strategy (#1309)
This commit is contained in:
parent
546ba33e6f
commit
bafdd3baf1
13 changed files with 515 additions and 195 deletions
|
|
@ -1,3 +1,5 @@
|
|||
import logging
|
||||
|
||||
from injector import inject, singleton
|
||||
from llama_index.llms import MockLLM
|
||||
from llama_index.llms.base import LLM
|
||||
|
|
@ -6,6 +8,8 @@ from private_gpt.components.llm.prompt_helper import get_prompt_style
|
|||
from private_gpt.paths import models_path
|
||||
from private_gpt.settings.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@singleton
|
||||
class LLMComponent:
|
||||
|
|
@ -13,6 +17,8 @@ class LLMComponent:
|
|||
|
||||
@inject
|
||||
def __init__(self, settings: Settings) -> None:
|
||||
llm_mode = settings.llm.mode
|
||||
logger.info("Initializing the LLM in mode=%s", llm_mode)
|
||||
match settings.llm.mode:
|
||||
case "local":
|
||||
from llama_index.llms import LlamaCPP
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue