Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,43 @@
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
|
4 |
-
#
|
5 |
-
API_URL = "https://api-inference.huggingface.co/models/
|
6 |
-
|
|
|
|
|
7 |
|
8 |
def query_llm(prompt):
|
9 |
payload = {"inputs": prompt}
|
10 |
response = requests.post(API_URL, headers=headers, json=payload)
|
11 |
-
return response.json()[0]['generated_text']
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
"goal": "Generate electricity using a lemon as a battery.",
|
24 |
-
"materials": "Lemon, Copper coin, Zinc nail, Wires, LED",
|
25 |
-
"default_hypothesis": "The lemon will generate voltage to light up a small LED.",
|
26 |
-
"result": "LED glows slightly due to electron flow.",
|
27 |
-
"explanation": "The citric acid acts as electrolyte between the copper and zinc electrodes."
|
28 |
-
}
|
29 |
-
}
|
30 |
|
31 |
# Streamlit UI
|
32 |
-
st.set_page_config(page_title="Science Lab Assistant",
|
33 |
st.title("π§ͺ Science Lab Assistant")
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
st.subheader("π¬ Describe your experiment")
|
51 |
-
experiment_name = st.text_input("Name or short description of your experiment")
|
52 |
-
goal = st.text_area("What is the goal of your experiment?")
|
53 |
-
materials = st.text_area("List the materials involved")
|
54 |
-
|
55 |
-
if st.button("π§ Generate Hypothesis and Expected Result"):
|
56 |
-
prompt = f"""I am conducting a school science experiment.
|
57 |
-
Experiment: {experiment_name}
|
58 |
-
Goal: {goal}
|
59 |
-
Materials: {materials}
|
60 |
-
Please suggest a hypothesis and likely result with a brief scientific explanation."""
|
61 |
-
result = query_llm(prompt)
|
62 |
-
st.markdown("### π€ Assistant's Suggestion")
|
63 |
-
st.write(result)
|
64 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
|
4 |
+
# Open-source LLM endpoint (replace if needed)
|
5 |
+
API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
|
6 |
+
|
7 |
+
# If you have a Hugging Face token, include it here. If not, this model works without one.
|
8 |
+
headers = {"Authorization": ""}
|
9 |
|
10 |
def query_llm(prompt):
|
11 |
payload = {"inputs": prompt}
|
12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
|
|
13 |
|
14 |
+
try:
|
15 |
+
output = response.json()
|
16 |
+
if isinstance(output, list) and 'generated_text' in output[0]:
|
17 |
+
return output[0]['generated_text']
|
18 |
+
elif isinstance(output, dict) and "error" in output:
|
19 |
+
return f"β API Error: {output['error']}"
|
20 |
+
else:
|
21 |
+
return "β Unexpected API response format."
|
22 |
+
except Exception as e:
|
23 |
+
return f"β Failed to parse response: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# Streamlit UI
|
26 |
+
st.set_page_config(page_title="Science Lab Assistant", page_icon="π§ͺ")
|
27 |
st.title("π§ͺ Science Lab Assistant")
|
28 |
+
st.subheader("Ask me about school science experiments!")
|
29 |
+
|
30 |
+
with st.form("lab_form"):
|
31 |
+
experiment = st.text_input("π Enter your experiment name or question:")
|
32 |
+
hypothesis = st.text_area("π‘ What is your hypothesis?")
|
33 |
+
submitted = st.form_submit_button("Explain Experiment")
|
34 |
+
|
35 |
+
if submitted and experiment:
|
36 |
+
prompt = f"Explain the experiment '{experiment}' for a school science class. The student hypothesizes: '{hypothesis}'. Provide an explanation and possible result."
|
37 |
+
result = query_llm(prompt)
|
38 |
+
st.markdown("### π§ AI Explanation")
|
39 |
+
st.write(result)
|
40 |
+
|
41 |
+
st.markdown("---")
|
42 |
+
st.markdown("π Example: `Vinegar and baking soda`")
|
43 |
+
st.markdown("π‘ Hypothesis: `Combining them will create bubbles.`")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|