Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -283,7 +283,7 @@ PROMPTS:
|
|
283 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
|
284 |
outputs = model.generate(
|
285 |
**inputs,
|
286 |
-
max_new_tokens=
|
287 |
temperature=0.3,
|
288 |
do_sample=False,
|
289 |
pad_token_id=tokenizer.eos_token_id
|
@@ -474,146 +474,314 @@ def gate_ui():
|
|
474 |
if st.button("Forgot?"):
|
475 |
st.info("Contact the admin to reset the APP_PASSWORD secret.")
|
476 |
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
|
|
|
478 |
def main():
|
479 |
-
if not
|
480 |
return
|
481 |
-
|
482 |
-
st.title("๐ค Grant Buddy
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
if "generated_queries" not in st.session_state:
|
495 |
-
st.session_state.generated_queries = {}
|
496 |
-
|
497 |
-
manual_context = st.text_area("๐ Optional: Add your own context (e.g., mission, goals)", height=150)
|
498 |
-
|
499 |
-
# # retriever = init_vector_search().as_retriever(search_kwargs={"k": k_value, "score_threshold": score_threshold})
|
500 |
-
retriever = init_vector_search().as_retriever()
|
501 |
-
vectorstore = init_vector_search()
|
502 |
-
|
503 |
-
# pre_k = k_value*4 # Retrieve more chunks first
|
504 |
-
# context_docs = retriever.get_relevant_documents(query, k=pre_k)
|
505 |
-
# if topics:
|
506 |
-
# context_docs = rerank_with_topics(context_docs, topics, alpha=topic_weight)
|
507 |
-
# context_docs = context_docs[:k_value] # Final top-k used in RAG
|
508 |
-
rag_chain = get_rag_chain(retriever, use_openai=USE_OPENAI, max_tokens=max_tokens)
|
509 |
-
|
510 |
-
uploaded_file = st.file_uploader("Upload PDF or TXT for extra context (optional)", type=["pdf", "txt"])
|
511 |
-
uploaded_text = ""
|
512 |
-
|
513 |
-
if uploaded_file:
|
514 |
-
with st.spinner("๐ Processing uploaded file..."):
|
515 |
if uploaded_file.name.endswith(".pdf"):
|
516 |
reader = PdfReader(uploaded_file)
|
517 |
-
uploaded_text = "\n".join(
|
518 |
-
|
|
|
|
|
519 |
uploaded_text = uploaded_file.read().decode("utf-8")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
520 |
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
|
573 |
-
|
574 |
-
|
575 |
|
576 |
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
|
611 |
|
612 |
|
613 |
|
614 |
|
615 |
-
if __name__ == "__main__":
|
616 |
-
|
617 |
|
618 |
|
619 |
|
|
|
283 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
|
284 |
outputs = model.generate(
|
285 |
**inputs,
|
286 |
+
max_new_tokens=512,
|
287 |
temperature=0.3,
|
288 |
do_sample=False,
|
289 |
pad_token_id=tokenizer.eos_token_id
|
|
|
474 |
if st.button("Forgot?"):
|
475 |
st.info("Contact the admin to reset the APP_PASSWORD secret.")
|
476 |
return False
|
477 |
+
# ===== RETRIEVAL SETTINGS SIDEBAR =====
|
478 |
+
def retrieval_settings():
|
479 |
+
st.sidebar.header("Retrieval Settings")
|
480 |
+
k_value = st.sidebar.slider("How many chunks to retrieve (k)", 5, 40, 20, 1)
|
481 |
+
min_score = st.sidebar.slider("Minimum relevance score", 0.0, 1.0, 0.0, 0.01)
|
482 |
+
topics_str = st.sidebar.text_input("Optional: Focus on specific topics (comma-separated)", "")
|
483 |
+
topic_score = st.sidebar.slider("Topic relevance score", 0.0, 1.0, 0.0, 0.01)
|
484 |
+
|
485 |
+
st.sidebar.header("Generation Settings")
|
486 |
+
max_tokens = st.sidebar.number_input("Max tokens in response", 100, 1500, 700, 50)
|
487 |
+
use_openai = st.sidebar.checkbox("Use OpenAI (Costs Tokens)", value=False)
|
488 |
+
|
489 |
+
topics = [t.strip() for t in topics_str.split(",") if t.strip()]
|
490 |
+
return {
|
491 |
+
"k": k_value,
|
492 |
+
"min_score": min_score,
|
493 |
+
"topics": topics,
|
494 |
+
"topic_score": topic_score,
|
495 |
+
"max_tokens": max_tokens,
|
496 |
+
"use_openai": use_openai
|
497 |
+
}
|
498 |
+
def parse_topics_field(val):
|
499 |
+
if not val:
|
500 |
+
return[]
|
501 |
+
if isinstance(val,list):
|
502 |
+
return [str(t).strip() for t in val if str(t).strip()]
|
503 |
+
return [t.strip() for t in str(val).split(",") if t.strip()]
|
504 |
+
def resolve_item_settings(item, defaults):
|
505 |
+
"""
|
506 |
+
Merge per-item overrides with UI defaults.
|
507 |
+
item: dict from JSON
|
508 |
+
defaults: dict from sidebar (k, max_tokens, use_openai, topics, topic_score)
|
509 |
+
"""
|
510 |
+
return {
|
511 |
+
"use_openai": bool(item.get("use_openai", defaults["use_openai"])),
|
512 |
+
"k": int(item.get("k", defaults["k"])),
|
513 |
+
"max_tokens": int(item.get("max_tokens", defaults["max_tokens"])),
|
514 |
+
# rerank controls:
|
515 |
+
"topics": parse_topics_field(item.get("topics", defaults.get("topics", []))),
|
516 |
+
"topic_score": float(item.get("topic_weight", defaults.get("topic_score", 0.0))),
|
517 |
+
}
|
518 |
+
def run_query(query: str,
|
519 |
+
manual_context: str,
|
520 |
+
vectorstore,
|
521 |
+
use_openai: bool,
|
522 |
+
k: int = 10,
|
523 |
+
topic_list: list[str] | None = None,
|
524 |
+
topic_alpha: float = 0.2,
|
525 |
+
max_tokens: int = 700):
|
526 |
+
pre_k = max(1, k * 4)
|
527 |
+
docs = vectorstore.similarity_search(query=query, k=pre_k)
|
528 |
+
|
529 |
+
if topic_list and topic_alpha > 0:
|
530 |
+
try:
|
531 |
+
docs = rerank_with_topics(docs, topic_list, alpha=topic_alpha)
|
532 |
+
except Exception as e:
|
533 |
+
st.warning(f"Topic rerank skipped: {e}")
|
534 |
+
|
535 |
+
docs = docs[:k]
|
536 |
+
|
537 |
+
rag_chain = get_rag_chain(retriever=None, use_openai=use_openai, max_tokens=max_tokens)
|
538 |
+
combined_manual = (manual_context or "").strip()
|
539 |
+
out = rag_chain.invoke({"question": query, "manual_context": combined_manual, "context_docs": docs})
|
540 |
+
|
541 |
+
return {"answer": out.get("answer", ""), "tokens": out.get("tokens", {}), "docs": docs}
|
542 |
|
543 |
+
# ===== MAIN =====
|
544 |
def main():
|
545 |
+
if not password_gate():
|
546 |
return
|
547 |
+
|
548 |
+
st.title("๐ค Grant Buddy โ Manual / JSON Mode")
|
549 |
+
settings = retrieval_settings()
|
550 |
+
|
551 |
+
tab_manual, tab_batch = st.tabs(["โ๏ธ Manual Mode", "๐งฉ JSON Batch Mode"])
|
552 |
+
|
553 |
+
# ---- Manual Mode ----
|
554 |
+
with tab_manual:
|
555 |
+
st.subheader("Manual Query")
|
556 |
+
uploaded_file = st.file_uploader("Upload PDF/TXT (optional)", type=["pdf", "txt"])
|
557 |
+
uploaded_text = ""
|
558 |
+
if uploaded_file:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
if uploaded_file.name.endswith(".pdf"):
|
560 |
reader = PdfReader(uploaded_file)
|
561 |
+
uploaded_text = "\n".join(
|
562 |
+
p.extract_text() for p in reader.pages if p.extract_text()
|
563 |
+
)
|
564 |
+
else:
|
565 |
uploaded_text = uploaded_file.read().decode("utf-8")
|
566 |
+
|
567 |
+
query = st.text_input("Enter your question")
|
568 |
+
if st.button("Run Manual Query"):
|
569 |
+
st.write("### Retrieval Settings Used:")
|
570 |
+
st.json(settings)
|
571 |
+
st.write("Running query:", query)
|
572 |
+
st.write("Uploaded context length:", len(uploaded_text))
|
573 |
+
# ๐น Call your retrieval + generation pipeline here
|
574 |
+
|
575 |
+
# ---- JSON Batch Mode ----
|
576 |
+
with tab_batch:
|
577 |
+
st.subheader("Batch from JSON")
|
578 |
+
json_file = st.file_uploader("Upload JSON config", type=["json"])
|
579 |
+
if json_file:
|
580 |
+
cfg = json.load(json_file)
|
581 |
+
st.json(cfg)
|
582 |
+
|
583 |
+
if st.button("Run Batch"):
|
584 |
+
results = []
|
585 |
+
# Sidebar defaults (what you already collect)
|
586 |
+
defaults = {
|
587 |
+
"k": settings["k"],
|
588 |
+
"max_tokens": settings["max_tokens"],
|
589 |
+
"use_openai": settings["use_openai"],
|
590 |
+
"topics": settings["topics"],
|
591 |
+
"topic_score": settings["topic_score"],
|
592 |
+
}
|
593 |
+
|
594 |
+
queries = cfg.get("queries", [])
|
595 |
+
if not queries:
|
596 |
+
st.warning("No 'queries' found in JSON.")
|
597 |
+
for i, item in enumerate(queries, start=1):
|
598 |
+
q = (item.get("query") or "").strip()
|
599 |
+
if not q:
|
600 |
+
st.warning(f"Item {i} missing 'query'; skipping.")
|
601 |
+
continue
|
602 |
+
|
603 |
+
item_settings = resolve_item_settings(item, defaults)
|
604 |
+
|
605 |
+
result = run_query(
|
606 |
+
query=q,
|
607 |
+
manual_context=manual_context, # global context
|
608 |
+
vectorstore=vectorstore,
|
609 |
+
use_openai=item_settings["use_openai"],
|
610 |
+
k=item_settings["k"],
|
611 |
+
topic_list=item_settings["topics"],
|
612 |
+
topic_alpha=item_settings["topic_score"],
|
613 |
+
max_tokens=item_settings["max_tokens"],
|
614 |
+
)
|
615 |
+
|
616 |
+
st.markdown(f"## ๐งฉ Query {i}")
|
617 |
+
st.markdown(f"**Prompt:** {q}")
|
618 |
+
st.caption(
|
619 |
+
f"Settings โ use_openai={item_settings['use_openai']}, "
|
620 |
+
f"k={item_settings['k']}, max_tokens={item_settings['max_tokens']}, "
|
621 |
+
f"topic_weight={item_settings['topic_score']}, topics={item_settings['topics']}"
|
622 |
+
)
|
623 |
+
st.markdown(result["answer"])
|
624 |
+
if result["tokens"]:
|
625 |
+
t = result["tokens"]
|
626 |
+
st.caption(f"Tokens โ prompt: {t.get('prompt')}, completion: {t.get('completion')}, total: {t.get('total')}")
|
627 |
+
show_chunks(result["docs"])
|
628 |
+
|
629 |
+
results.append({
|
630 |
+
"query": q,
|
631 |
+
"settings": item_settings,
|
632 |
+
"answer": result["answer"],
|
633 |
+
"tokens": result["tokens"]
|
634 |
+
})
|
635 |
+
|
636 |
+
st.download_button(
|
637 |
+
"๐พ Download results JSON",
|
638 |
+
data=json.dumps({"results": results}, indent=2),
|
639 |
+
file_name="grantbuddy_results.json",
|
640 |
+
mime="application/json"
|
641 |
+
)
|
642 |
+
|
643 |
+
|
644 |
+
if __name__ == "__main__":
|
645 |
+
main()
|
646 |
+
# def main():
|
647 |
+
# if not gate_ui():
|
648 |
+
# return
|
649 |
+
# # st.set_page_config(page_title="Grant Buddy RAG", page_icon="๐ค")
|
650 |
+
# st.title("๐ค Grant Buddy: Grant-Writing Assistant")
|
651 |
+
# USE_OPENAI = st.sidebar.checkbox("Use OpenAI (Costs Tokens)", value=False)
|
652 |
+
# st.sidebar.markdown("### Retrieval Settings")
|
653 |
|
654 |
+
# k_value = st.sidebar.slider("How many chunks to retrieve (k)", min_value=5, max_value=40, step=5, value=10)
|
655 |
+
# score_threshold = st.sidebar.slider("Minimum relevance score", min_value=0.0, max_value=1.0, step=0.05, value=0.75)
|
656 |
+
# topic_input=st.sidebar.text_input("Optional: Focus on specific topics (comma-separated)")
|
657 |
+
# topics=[t.strip() for t in topic_input.split(",") if t.strip()]
|
658 |
+
# topic_weight= st.sidebar.slider("Topic relevance score", min_value=0.0, max_value=1.0, step=0.05, value=0.2)
|
659 |
+
# st.sidebar.markdown("### Generation Settings")
|
660 |
+
# max_tokens = st.sidebar.number_input("Max tokens in response", min_value=100, max_value=1500, value=700, step=50)
|
661 |
+
|
662 |
+
# if "generated_queries" not in st.session_state:
|
663 |
+
# st.session_state.generated_queries = {}
|
664 |
+
|
665 |
+
# manual_context = st.text_area("๐ Optional: Add your own context (e.g., mission, goals)", height=150)
|
666 |
+
|
667 |
+
# # # retriever = init_vector_search().as_retriever(search_kwargs={"k": k_value, "score_threshold": score_threshold})
|
668 |
+
# retriever = init_vector_search().as_retriever()
|
669 |
+
# vectorstore = init_vector_search()
|
670 |
+
|
671 |
+
# # pre_k = k_value*4 # Retrieve more chunks first
|
672 |
+
# # context_docs = retriever.get_relevant_documents(query, k=pre_k)
|
673 |
+
# # if topics:
|
674 |
+
# # context_docs = rerank_with_topics(context_docs, topics, alpha=topic_weight)
|
675 |
+
# # context_docs = context_docs[:k_value] # Final top-k used in RAG
|
676 |
+
# rag_chain = get_rag_chain(retriever, use_openai=USE_OPENAI, max_tokens=max_tokens)
|
677 |
+
|
678 |
+
# uploaded_file = st.file_uploader("Upload PDF or TXT for extra context (optional)", type=["pdf", "txt"])
|
679 |
+
# uploaded_text = ""
|
680 |
+
|
681 |
+
# if uploaded_file:
|
682 |
+
# with st.spinner("๐ Processing uploaded file..."):
|
683 |
+
# if uploaded_file.name.endswith(".pdf"):
|
684 |
+
# reader = PdfReader(uploaded_file)
|
685 |
+
# uploaded_text = "\n".join([page.extract_text() for page in reader.pages if page.extract_text()])
|
686 |
+
# elif uploaded_file.name.endswith(".txt"):
|
687 |
+
# uploaded_text = uploaded_file.read().decode("utf-8")
|
688 |
+
|
689 |
+
# # extract qs and headers using llms
|
690 |
+
# questions = extract_with_llm_local(uploaded_text, use_openai=USE_OPENAI)
|
691 |
+
|
692 |
+
# # filter out irrelevant text
|
693 |
+
# def is_meaningful_prompt(text: str) -> bool:
|
694 |
+
# too_short = len(text.strip()) < 10
|
695 |
+
# banned_keywords = ["phone", "email", "fax", "address", "date", "contact", "website"]
|
696 |
+
# contains_bad_word = any(word in text.lower() for word in banned_keywords)
|
697 |
+
# is_just_punctuation = all(c in ":.*- " for c in text.strip())
|
698 |
+
# return not (too_short or contains_bad_word or is_just_punctuation)
|
699 |
+
|
700 |
+
# filtered_questions = [q for q in questions if is_meaningful_prompt(q)]
|
701 |
+
# with st.form("question_selection_form"):
|
702 |
+
# st.subheader("Choose prompts to answer:")
|
703 |
+
# selected_questions=[]
|
704 |
+
# for i,q in enumerate(filtered_questions):
|
705 |
+
# if st.checkbox(q, key=f"q_{i}", value=True):
|
706 |
+
# selected_questions.append(q)
|
707 |
+
# submit_button = st.form_submit_button("Submit")
|
708 |
|
709 |
+
# #Multi-Select Question
|
710 |
+
# if 'submit_button' in locals() and submit_button:
|
711 |
+
# if selected_questions:
|
712 |
+
# with st.spinner("๐ก Generating answers..."):
|
713 |
+
# answers = []
|
714 |
+
# for q in selected_questions:
|
715 |
+
# combined_context = "\n\n".join(filter(None, [manual_context.strip(), uploaded_text.strip()]))
|
716 |
+
# pre_k=k_value*4
|
717 |
+
# context_docs=retriever.get_relevant_documents(q, k=pre_k)
|
718 |
+
# if topics:
|
719 |
+
# context_docs=rerank_with_topics(context_docs,topics,alpha=topic_weight)
|
720 |
+
# context_docs=context_docs[:k_value]
|
721 |
+
# # full_query = f"{q}\n\nAdditional context:\n{uploaded_text}"
|
722 |
|
723 |
+
# if q in st.session_state.generated_queries:
|
724 |
+
# response = st.session_state.generated_queries[q]
|
725 |
+
# else:
|
726 |
+
# response = rag_chain.invoke({
|
727 |
+
# "question": q,
|
728 |
+
# "manual_context": combined_context,
|
729 |
+
# "context_docs": context_docs
|
730 |
+
# })
|
731 |
+
# st.session_state.generated_queries[q] = response
|
732 |
+
# answers.append({"question": q, "answer": response})
|
733 |
+
# for item in answers:
|
734 |
+
# st.markdown(f"### โ {item['question']}")
|
735 |
+
# st.markdown(f"๐ฌ {item['answer']['answer']}")
|
736 |
+
# tokens = item['answer'].get("tokens", {})
|
737 |
+
# if tokens:
|
738 |
+
# st.markdown(f"๐งฎ **Token Usage:** Prompt = {tokens.get('prompt')}, "
|
739 |
+
# f"Completion = {tokens.get('completion')}, Total = {tokens.get('total')}")
|
740 |
|
741 |
+
# else:
|
742 |
+
# st.info("No prompts selected for answering.")
|
743 |
|
744 |
|
745 |
+
# # โ๏ธ Manual single-question input
|
746 |
+
# query = st.text_input("Ask a grant-related question")
|
747 |
+
# if st.button("Submit"):
|
748 |
+
# if not query:
|
749 |
+
# st.warning("Please enter a question.")
|
750 |
+
# return
|
751 |
+
|
752 |
+
# # full_query = f"{query}\n\nAdditional context:\n{uploaded_text}" if uploaded_text else query
|
753 |
+
# pre_k = k_value * 4
|
754 |
+
# context_docs=retriever.get_relevant_documents(query, k=pre_k)
|
755 |
+
# if topics:
|
756 |
+
# context_docs=rerank_with_topics(context_docs, topics, alpha=topic_weight)
|
757 |
+
# context_docs = context_docs[:k_value]
|
758 |
+
# combined_context = "\n\n".join(filter(None, [manual_context.strip(), uploaded_text.strip()]))
|
759 |
+
# with st.spinner("๐ค Thinking..."):
|
760 |
+
# # response = rag_chain.invoke(full_query)
|
761 |
+
# response = rag_chain.invoke({"question":query,"manual_context": combined_context, "context_docs": context_docs})
|
762 |
+
# st.text_area("Grant Buddy says:", value=response["answer"], height=250, disabled=True)
|
763 |
+
# tokens=response.get("tokens",{})
|
764 |
+
# if tokens:
|
765 |
+
# st.markdown(f"๐งฎ **Token Usage:** Prompt = {tokens.get('prompt')}, "
|
766 |
+
# f"Completion = {tokens.get('completion')}, Total = {tokens.get('total')}")
|
767 |
|
768 |
+
# with st.expander("๐ Retrieved Chunks"):
|
769 |
+
# # context_docs = retriever.get_relevant_documents(query)
|
770 |
+
# for doc in context_docs:
|
771 |
+
# # st.json(doc.metadata)
|
772 |
+
# st.markdown(f"**Chunk ID:** {doc.metadata.get('chunk_id', 'unknown')} | **Title:** {doc.metadata['metadata'].get('title', 'unknown')}")
|
773 |
+
# st.markdown(doc.page_content[:700] + "...")
|
774 |
+
# if topics:
|
775 |
+
# matched_topics=set(doc.metadata['metadata'].get('topics',[])).intersection(topics)
|
776 |
+
# st.markdown(f"**Matched Topics** {','.join(matched_topics)}")
|
777 |
+
# st.markdown("---")
|
778 |
|
779 |
|
780 |
|
781 |
|
782 |
|
783 |
+
# if __name__ == "__main__":
|
784 |
+
# main()
|
785 |
|
786 |
|
787 |
|