JohnAlexander23's picture
Update app.py
9260a76 verified
raw
history blame
1.28 kB
import streamlit as st
import os
from groq import Groq # Ensure Groq library supports this usage
from dotenv import load_dotenv
load_dotenv()
os.getenv("GROQ_API_KEY")
def fetch_response(user_input):
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
chat_completion = client.chat.completions.create(
messages=[
{"role": "system", "content": "you are a helpful assistant. Take the input from the users and try to provide as detailed response as possible. Provide proper expamples to help the user. Try to mention references or provide citations to make it more detail oriented."},
{"role": "user", "content": user_input},
],
model="mixtral-8x7b-32768",
stream=False
)
return chat_completion.choices[0].message.content
# for each in chat_completion:
# yield (each.choices[0].delta.content)
# #end=""
def fetch_response(text_input):
# Implement your function to fetch response here
return "Response: " + text_input # For demonstration purposes
st.title("Fastest AI Chatbot By DL Titan")
st.write("Ask a question and get a response.")
text_input = st.text_input("Enter your question here:")
if st.button("Submit"):
response = fetch_response(text_input)
st.write(response)