Spaces:
Sleeping
Sleeping
File size: 1,698 Bytes
bc20a41 ebbd85b 8ca2f9f ebbd85b 8ca2f9f ebbd85b 8ca2f9f ebbd85b 8ca2f9f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import streamlit as st
import requests
# Open-source LLM endpoint (replace if needed)
API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
# If you have a Hugging Face token, include it here. If not, this model works without one.
headers = {"Authorization": ""}
def query_llm(prompt):
payload = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=payload)
try:
output = response.json()
if isinstance(output, list) and 'generated_text' in output[0]:
return output[0]['generated_text']
elif isinstance(output, dict) and "error" in output:
return f"β API Error: {output['error']}"
else:
return "β Unexpected API response format."
except Exception as e:
return f"β Failed to parse response: {str(e)}"
# Streamlit UI
st.set_page_config(page_title="Science Lab Assistant", page_icon="π§ͺ")
st.title("π§ͺ Science Lab Assistant")
st.subheader("Ask me about school science experiments!")
with st.form("lab_form"):
experiment = st.text_input("π Enter your experiment name or question:")
hypothesis = st.text_area("π‘ What is your hypothesis?")
submitted = st.form_submit_button("Explain Experiment")
if submitted and experiment:
prompt = f"Explain the experiment '{experiment}' for a school science class. The student hypothesizes: '{hypothesis}'. Provide an explanation and possible result."
result = query_llm(prompt)
st.markdown("### π§ AI Explanation")
st.write(result)
st.markdown("---")
st.markdown("π Example: `Vinegar and baking soda`")
st.markdown("π‘ Hypothesis: `Combining them will create bubbles.`")
|