File size: 1,283 Bytes
066505e
bb7b215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9260a76
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import streamlit as st
import os
from groq import Groq  # Ensure Groq library supports this usage
from dotenv import load_dotenv
load_dotenv()

os.getenv("GROQ_API_KEY")

def fetch_response(user_input):
    client = Groq(api_key=os.getenv("GROQ_API_KEY"))
    chat_completion = client.chat.completions.create(
        messages=[
            {"role": "system", "content": "you are a helpful assistant. Take the input from the users and try to provide as detailed response as possible. Provide proper expamples to help the user. Try to mention references or provide citations to make it more detail oriented."},
            {"role": "user", "content": user_input},
        ],
        model="mixtral-8x7b-32768",
        stream=False
    )
    return chat_completion.choices[0].message.content
    # for each in chat_completion:
    #     yield (each.choices[0].delta.content) 
    #     #end=""


def fetch_response(text_input):
    # Implement your function to fetch response here
    return "Response: " + text_input  # For demonstration purposes

st.title("Fastest AI Chatbot By DL Titan")
st.write("Ask a question and get a response.")

text_input = st.text_input("Enter your question here:")
if st.button("Submit"):
    response = fetch_response(text_input)
    st.write(response)