File size: 1,433 Bytes
887ce17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import streamlit as st

#from langchain_openai import OpenAI
#from langchain.llms import HuggingFaceEndpoint
from langchain_community.llms import HuggingFaceEndpoint

#When deployed on huggingface spaces, this values has to be passed using Variables & Secrets setting, as shown in the video :)
#import os
#os.environ["OPENAI_API_KEY"] = "sk-PLfFwPq6y24234234234FJ1Uc234234L8hVowXdt"

#Function to return the response
def load_answer(question):
    # "text-davinci-003" model is depreciated, so using the latest one https://platform.openai.com/docs/deprecations
    #llm = OpenAI(model_name="gpt-3.5-turbo-instruct",temperature=0)
    llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.2", Temperature=0.9)
    #Last week langchain has recommended to use invoke function for the below please :)
    answer=llm.invoke(question)
    return answer


#App UI starts here
st.set_page_config(page_title="Sentiment Analysis", page_icon=":robot:")
st.header("Sentiment Analysis")

#Gets the user input
def get_text():
    input_text = st.text_input("You:", "Pls Write Your Something.......")
    if input_text.isalpha():
        st.write(text, 'string', )
    else:
        st.write('Please type in a string Only')
    return input_text

user_input=get_text()
response = load_answer(user_input)

submit = st.button('Generate')  

#If generate button is clicked
if submit:

    st.subheader("Answer:")

    st.write(response)