Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
import os | |
from datetime import date | |
from transformers import pipeline | |
PROFILE_FILE = "about_me.json" | |
DAILY_FILE = "daily_status.json" | |
def load_json(path, default): | |
if os.path.exists(path): | |
with open(path) as f: | |
return json.load(f) | |
return default | |
profile = load_json(PROFILE_FILE, {}) | |
daily = load_json(DAILY_FILE, {}) | |
def build_context(profile, daily): | |
recent_days = sorted(daily.keys(), reverse=True)[:7] | |
daily_lines = "\n".join([f"{d}: {daily[d].get('log','')}" for d in recent_days]) | |
context = ( | |
f"Profile:\n{json.dumps(profile, indent=2)}\n" | |
f"Recent daily logs:\n{daily_lines}\n" | |
"You are a helpful assistant. Answer only using the provided information. " | |
"If you don't know the answer, reply: 'Sheetal hasn't shared that yet!'" | |
) | |
return context | |
def get_llm(): | |
return pipeline( | |
"text-generation", | |
model="google/flan-t5-small", # Fast! | |
max_new_tokens=128, | |
do_sample=True, | |
temperature=0.7, | |
) | |
llm = None | |
def chatbot_qa(user_q): | |
global llm | |
if llm is None: | |
llm = get_llm() | |
context = build_context(profile, daily) | |
prompt = f"System: {context}\nUser: {user_q}\nAssistant:" | |
outputs = llm(prompt, max_new_tokens=128) | |
answer = outputs[0]["generated_text"].split("Assistant:")[-1].strip() | |
return answer | |
with gr.Blocks(title="Sheetal's Personal Chatbot") as demo: | |
gr.Markdown("# πΈ Sheetal's Personal Chatbot") | |
gr.Markdown("Ask anything about Sheetal!") | |
with gr.Tab("π¬ Ask About Sheetal"): | |
gr.Markdown("### π¬ Ask Anything About Sheetal") | |
user_q = gr.Textbox(label="Type your question here:") | |
ask_btn = gr.Button("Ask") | |
answer_box = gr.Textbox(label="Bot answer", interactive=False, lines=2, max_lines=4) | |
ask_btn.click( | |
fn=chatbot_qa, | |
inputs=user_q, | |
outputs=answer_box | |
) | |
demo.launch() | |