mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 17:05:41 +01:00
Working refactor. Dependency clean-up pending.
This commit is contained in:
parent
12f3a39e8a
commit
d0a7d991a2
20 changed files with 877 additions and 907 deletions
|
|
@ -1,14 +1,15 @@
|
|||
from dataclasses import dataclass
|
||||
|
||||
from injector import inject, singleton
|
||||
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
|
||||
from llama_index.chat_engine import ContextChatEngine, SimpleChatEngine
|
||||
from llama_index.chat_engine.types import (
|
||||
from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine
|
||||
from llama_index.core.chat_engine.types import (
|
||||
BaseChatEngine,
|
||||
)
|
||||
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
|
||||
from llama_index.llms import ChatMessage, MessageRole
|
||||
from llama_index.types import TokenGen
|
||||
from llama_index.core.indices import VectorStoreIndex
|
||||
from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor
|
||||
from llama_index.core.llms import ChatMessage, MessageRole
|
||||
from llama_index.core.storage import StorageContext
|
||||
from llama_index.core.types import TokenGen
|
||||
from pydantic import BaseModel
|
||||
|
||||
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
|
||||
|
|
@ -75,20 +76,19 @@ class ChatService:
|
|||
embedding_component: EmbeddingComponent,
|
||||
node_store_component: NodeStoreComponent,
|
||||
) -> None:
|
||||
self.llm_service = llm_component
|
||||
self.llm_component = llm_component
|
||||
self.embedding_component = embedding_component
|
||||
self.vector_store_component = vector_store_component
|
||||
self.storage_context = StorageContext.from_defaults(
|
||||
vector_store=vector_store_component.vector_store,
|
||||
docstore=node_store_component.doc_store,
|
||||
index_store=node_store_component.index_store,
|
||||
)
|
||||
self.service_context = ServiceContext.from_defaults(
|
||||
llm=llm_component.llm, embed_model=embedding_component.embedding_model
|
||||
)
|
||||
self.index = VectorStoreIndex.from_vector_store(
|
||||
vector_store_component.vector_store,
|
||||
storage_context=self.storage_context,
|
||||
service_context=self.service_context,
|
||||
llm=llm_component.llm,
|
||||
embed_model=embedding_component.embedding_model,
|
||||
show_progress=True,
|
||||
)
|
||||
|
||||
|
|
@ -102,10 +102,17 @@ class ChatService:
|
|||
vector_index_retriever = self.vector_store_component.get_retriever(
|
||||
index=self.index, context_filter=context_filter
|
||||
)
|
||||
# TODO ContextChatEngine is still not migrated by LlamaIndex to accept
|
||||
# llm directly, so we are passing legacy ServiceContext until it is fixed.
|
||||
from llama_index.core import ServiceContext
|
||||
return ContextChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
retriever=vector_index_retriever,
|
||||
service_context=self.service_context,
|
||||
llm=self.llm_component.llm, # Takes no effect at the moment
|
||||
service_context=ServiceContext.from_defaults(
|
||||
llm=self.llm_component.llm,
|
||||
embed_model=self.embedding_component.embedding_model,
|
||||
),
|
||||
node_postprocessors=[
|
||||
MetadataReplacementPostProcessor(target_metadata_key="window"),
|
||||
],
|
||||
|
|
@ -113,7 +120,7 @@ class ChatService:
|
|||
else:
|
||||
return SimpleChatEngine.from_defaults(
|
||||
system_prompt=system_prompt,
|
||||
service_context=self.service_context,
|
||||
llm=self.llm_component.llm,
|
||||
)
|
||||
|
||||
def stream_chat(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue