mirror of
https://github.com/zylon-ai/private-gpt.git
synced 2025-12-22 07:40:12 +01:00
End-to-end working version
This commit is contained in:
parent
51dae80058
commit
55338b8f6e
6 changed files with 943 additions and 0 deletions
25
privateGPT.py
Normal file
25
privateGPT.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
from gpt4all_j import GPT4All_J
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain.embeddings import LlamaCppEmbeddings
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.vectorstores import Chroma
|
||||
|
||||
def main():
|
||||
# Load stored vectorstore
|
||||
llama = LlamaCppEmbeddings(model_path="./models/ggml-model-q4_0.bin")
|
||||
persist_directory = 'db'
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=llama)
|
||||
retriever = db.as_retriever()
|
||||
# Prepare the LLM
|
||||
callbacks = [StreamingStdOutCallbackHandler()]
|
||||
llm = GPT4All_J(model='./models/ggml-gpt4all-j-v1.3-groovy.bin', callbacks=callbacks, verbose=False)
|
||||
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
|
||||
# Interactive questions and answers
|
||||
while True:
|
||||
query = input("Enter a query: ")
|
||||
if query == "exit":
|
||||
break
|
||||
qa.run(query)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue