import streamlit as st import requests # Open-source LLM endpoint (replace if needed) API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small" # If you have a Hugging Face token, include it here. If not, this model works without one. headers = {"Authorization": ""} def query_llm(prompt): payload = {"inputs": prompt} response = requests.post(API_URL, headers=headers, json=payload) try: output = response.json() if isinstance(output, list) and 'generated_text' in output[0]: return output[0]['generated_text'] elif isinstance(output, dict) and "error" in output: return f"โŒ API Error: {output['error']}" else: return "โŒ Unexpected API response format." except Exception as e: return f"โŒ Failed to parse response: {str(e)}" # Streamlit UI st.set_page_config(page_title="Science Lab Assistant", page_icon="๐Ÿงช") st.title("๐Ÿงช Science Lab Assistant") st.subheader("Ask me about school science experiments!") with st.form("lab_form"): experiment = st.text_input("๐Ÿ” Enter your experiment name or question:") hypothesis = st.text_area("๐Ÿ’ก What is your hypothesis?") submitted = st.form_submit_button("Explain Experiment") if submitted and experiment: prompt = f"Explain the experiment '{experiment}' for a school science class. The student hypothesizes: '{hypothesis}'. Provide an explanation and possible result." result = query_llm(prompt) st.markdown("### ๐Ÿง  AI Explanation") st.write(result) st.markdown("---") st.markdown("๐Ÿ“š Example: `Vinegar and baking soda`") st.markdown("๐Ÿ’ก Hypothesis: `Combining them will create bubbles.`")