Spaces:
Sleeping
Sleeping
tomas.helmfridsson
commited on
Commit
·
2ae94e1
1
Parent(s):
7eb58a8
update guis 6
Browse files
app.py
CHANGED
@@ -22,33 +22,71 @@ def load_vectorstore():
|
|
22 |
vectorstore = FAISS.from_documents(all_docs, embedding)
|
23 |
return vectorstore, loaded_files
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
with gr.Blocks() as demo:
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
29 |
vectorstore, loaded_files = load_vectorstore()
|
30 |
-
|
31 |
-
llm = HuggingFacePipeline(pipeline=llm_pipeline, model_kwargs={"temperature": 0.3, "max_new_tokens": 512})
|
32 |
-
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever())
|
33 |
|
|
|
34 |
loaded_list = "\n".join([f"- {f}" for f in loaded_files])
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
def chat_fn(message, history):
|
|
|
|
|
|
|
37 |
if len(message) > 1000:
|
38 |
-
|
|
|
|
|
39 |
try:
|
40 |
-
svar =
|
|
|
41 |
except Exception as e:
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
gr.Markdown(f"✅ Klar! Du kan nu ställa frågor om dokumenten nedan:\n\n{loaded_list}", elem_id="status-text")
|
46 |
|
47 |
-
|
|
|
48 |
fn=chat_fn,
|
49 |
-
|
50 |
-
chatbot
|
51 |
-
description="Hej! Jag är din dokumentassistent. Ställ en fråga baserat på innehållet i dina PDF-filer."
|
52 |
)
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
22 |
vectorstore = FAISS.from_documents(all_docs, embedding)
|
23 |
return vectorstore, loaded_files
|
24 |
|
25 |
+
# Skapa RAG-chain med vald temperaturfunktion
|
26 |
+
def create_chain(temp):
|
27 |
+
llm_pipeline = pipeline(
|
28 |
+
"text-generation", model="tiiuae/falcon-rw-1b", device=-1
|
29 |
+
)
|
30 |
+
llm = HuggingFacePipeline(
|
31 |
+
pipeline=llm_pipeline,
|
32 |
+
model_kwargs={"temperature": temp, "max_new_tokens": 512},
|
33 |
+
)
|
34 |
+
return RetrievalQA.from_chain_type(
|
35 |
+
llm=llm, retriever=vectorstore.as_retriever()
|
36 |
+
)
|
37 |
+
|
38 |
+
# Gradio UI
|
39 |
with gr.Blocks() as demo:
|
40 |
+
# Temperatur-reglage
|
41 |
+
temp_slider = gr.Slider(
|
42 |
+
label="🎛️ Temperatur (0 = exakt, 1 = kreativ)",
|
43 |
+
minimum=0.0,
|
44 |
+
maximum=1.0,
|
45 |
+
value=0.3,
|
46 |
+
step=0.05,
|
47 |
+
)
|
48 |
+
# Chat-komponent
|
49 |
+
chatbot = gr.Chatbot()
|
50 |
+
# Text-input
|
51 |
+
input_box = gr.Textbox(label="Din fråga")
|
52 |
+
send_button = gr.Button("Skicka")
|
53 |
+
status_text = gr.Markdown("🔄 Laddar modellen, vänta...")
|
54 |
|
55 |
+
# Ladda dokument + skapa initial chain
|
56 |
vectorstore, loaded_files = load_vectorstore()
|
57 |
+
qa_chain = create_chain(temp_slider.value)
|
|
|
|
|
58 |
|
59 |
+
# Visa klara status och vilka PDF:er som laddats
|
60 |
loaded_list = "\n".join([f"- {f}" for f in loaded_files])
|
61 |
+
status_text.update(visible=False)
|
62 |
+
gr.Markdown(
|
63 |
+
f"✅ Klar! Du kan nu ställa frågor om dokumenten nedan:\n\n{loaded_list}"
|
64 |
+
)
|
65 |
|
66 |
+
def chat_fn(message, history, temp):
|
67 |
+
# Uppdatera chain om temperaturen ändrats
|
68 |
+
chain = create_chain(temp)
|
69 |
+
# Begränsa fråga-längd
|
70 |
if len(message) > 1000:
|
71 |
+
history.append((message, f"⚠️ Din fråga är för lång ({len(message)} tecken). Försök en kortare fråga."))
|
72 |
+
return history
|
73 |
+
# Kör fråga
|
74 |
try:
|
75 |
+
svar = chain.invoke({"query": message})
|
76 |
+
out = svar["result"]
|
77 |
except Exception as e:
|
78 |
+
out = f"Ett fel uppstod: {e}"
|
79 |
+
history.append((message, out))
|
80 |
+
return history
|
|
|
81 |
|
82 |
+
# Koppla knappen till chat-funktionen
|
83 |
+
send_button.click(
|
84 |
fn=chat_fn,
|
85 |
+
inputs=[input_box, chatbot, temp_slider],
|
86 |
+
outputs=chatbot,
|
|
|
87 |
)
|
88 |
|
89 |
+
# Starta appen
|
90 |
+
if __name__ == "__main__":
|
91 |
+
demo.launch()
|
92 |
+
|