tomas.helmfridsson commited on
Commit
4c1d80e
·
1 Parent(s): b944ab4

remove btn

Browse files
Files changed (1) hide show
  1. app.py +16 -28
app.py CHANGED
@@ -2,12 +2,12 @@ import gradio as gr
2
  from langchain_community.document_loaders import PyPDFLoader
3
  from langchain_community.vectorstores import FAISS
4
  from langchain_huggingface.embeddings import HuggingFaceEmbeddings
5
- from langchain_community.llms import HuggingFacePipeline
6
  from langchain.chains import RetrievalQA
7
  from transformers import pipeline
8
  import os
9
 
10
- # 1. Ladda och indexera alla PDF:er i mappen "dokument/"
11
  def load_vectorstore():
12
  all_docs = []
13
  for filename in os.listdir("document"):
@@ -19,32 +19,20 @@ def load_vectorstore():
19
  embedding = HuggingFaceEmbeddings(model_name="KBLab/sentence-bert-swedish-cased")
20
  return FAISS.from_documents(all_docs, embedding)
21
 
22
- vectorstore = load_vectorstore()
 
 
 
 
 
23
 
24
- # 2. Initiera en mindre modell
 
 
25
 
26
- def load_model():
27
- pipe = pipeline("text-generation", model="tiiuae/falcon-rw-1b")
28
- return HuggingFacePipeline(pipeline=pipe, model_kwargs={"temperature": 0.3, "max_new_tokens": 512})
 
29
 
30
- llm = load_model()
31
-
32
- # 3. Bygg QA-kedjan
33
- qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever())
34
-
35
- # 4. Funktion för Gradio-chat
36
- chat_history = []
37
-
38
- def chat_fn(message, history):
39
- svar = qa_chain.run(message)
40
- return svar
41
-
42
- # 5. Starta Gradio-gränssnittet
43
- chatbot = gr.ChatInterface(fn=chat_fn,
44
- title="🌟 Dokumentagent på Svenska",
45
- theme="soft",
46
- examples=["Vad handlar dokumentet om?", "Finns det något om diabetes?", "Vilken åtgärd föreslås?"],
47
- retry_btn="↻ Pröva igen",
48
- submit_btn="Ställ fråga")
49
-
50
- chatbot.launch()
 
2
  from langchain_community.document_loaders import PyPDFLoader
3
  from langchain_community.vectorstores import FAISS
4
  from langchain_huggingface.embeddings import HuggingFaceEmbeddings
5
+ from langchain_huggingface.llms import HuggingFacePipeline
6
  from langchain.chains import RetrievalQA
7
  from transformers import pipeline
8
  import os
9
 
10
+ # 1. Ladda och indexera alla PDF:er i mappen "document/"
11
  def load_vectorstore():
12
  all_docs = []
13
  for filename in os.listdir("document"):
 
19
  embedding = HuggingFaceEmbeddings(model_name="KBLab/sentence-bert-swedish-cased")
20
  return FAISS.from_documents(all_docs, embedding)
21
 
22
+ with gr.Blocks() as demo:
23
+ gr.Markdown("### ⏳ Laddar dokument och språkmodell...")
24
+ vectorstore = load_vectorstore()
25
+ llm = pipeline("text-generation", model="tiiuae/falcon-rw-1b", device=-1)
26
+ llm = HuggingFacePipeline(pipeline=llm, model_kwargs={"temperature": 0.3, "max_new_tokens": 512})
27
+ qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever())
28
 
29
+ def chat_fn(message, history):
30
+ svar = qa_chain.run(message)
31
+ return svar
32
 
33
+ chatbot = gr.ChatInterface(fn=chat_fn,
34
+ title="🌟 Dokumentassistent (Svenska)",
35
+ examples=["Vad handlar dokumentet om?", "Står det något om diabetes?", "Vilken åtgärd rekommenderas?"])
36
+ chatbot.render()
37
 
38
+ demo.launch()