LexGuardian / app.py
sunbal7's picture
Update app.py
8ca2f9f verified
raw
history blame
1.7 kB
import streamlit as st
import requests
# Open-source LLM endpoint (replace if needed)
API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
# If you have a Hugging Face token, include it here. If not, this model works without one.
headers = {"Authorization": ""}
def query_llm(prompt):
payload = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=payload)
try:
output = response.json()
if isinstance(output, list) and 'generated_text' in output[0]:
return output[0]['generated_text']
elif isinstance(output, dict) and "error" in output:
return f"❌ API Error: {output['error']}"
else:
return "❌ Unexpected API response format."
except Exception as e:
return f"❌ Failed to parse response: {str(e)}"
# Streamlit UI
st.set_page_config(page_title="Science Lab Assistant", page_icon="πŸ§ͺ")
st.title("πŸ§ͺ Science Lab Assistant")
st.subheader("Ask me about school science experiments!")
with st.form("lab_form"):
experiment = st.text_input("πŸ” Enter your experiment name or question:")
hypothesis = st.text_area("πŸ’‘ What is your hypothesis?")
submitted = st.form_submit_button("Explain Experiment")
if submitted and experiment:
prompt = f"Explain the experiment '{experiment}' for a school science class. The student hypothesizes: '{hypothesis}'. Provide an explanation and possible result."
result = query_llm(prompt)
st.markdown("### 🧠 AI Explanation")
st.write(result)
st.markdown("---")
st.markdown("πŸ“š Example: `Vinegar and baking soda`")
st.markdown("πŸ’‘ Hypothesis: `Combining them will create bubbles.`")