Spaces:
Runtime error
Runtime error
File size: 2,131 Bytes
177f75a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import streamlit as st
import requests
import os
# Load Groq API key from environment variable
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
# Choose your Groq model
MODEL_NAME = "mixtral-8x7b-32768" # or "llama3-8b-8192", etc.
# Set up chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Title and description
st.title("🔧 Failure Diagnosis Bot")
st.markdown("A chatbot that helps you identify mechanical issues based on symptoms and suggests fixes and tools.")
# Input box
user_input = st.text_input("Describe your machine's problem:", key="user_input")
# Submit button
if st.button("Diagnose"):
if user_input:
st.session_state.messages.append({"role": "user", "content": user_input})
# Create the chat prompt
full_prompt = [
{"role": "system", "content": (
"You are an expert mechanical engineer bot that helps diagnose mechanical failures. "
"Given a user's input about machine problems (like grinding noise, overheating, etc.), "
"respond with the most likely cause, recommended fix, and tools needed. "
"Keep answers clear, concise, and technically accurate."
)},
*st.session_state.messages
]
# Send request to Groq API
response = requests.post(
"https://api.groq.com/openai/v1/chat/completions",
headers={
"Authorization": f"Bearer {GROQ_API_KEY}",
"Content-Type": "application/json"
},
json={
"model": MODEL_NAME,
"messages": full_prompt,
"temperature": 0.7
}
)
if response.status_code == 200:
reply = response.json()["choices"][0]["message"]["content"]
st.session_state.messages.append({"role": "assistant", "content": reply})
else:
st.error("Error contacting Groq API.")
st.json(response.json())
# Display chat history
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
|