llm_3 / app.py
lorentz's picture
Update app.py
47c8c7d verified
raw
history blame
3.88 kB
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain import FewShotPromptTemplate
from langchain.prompts.example_selector import LengthBasedExampleSelector
from dotenv import load_dotenv
def getLLMResponse(query, age_option,tasktype_option):
examples = []
llm = OpenAI(temperature=.9, model="gpt-3.5-turbo-instruct")
example_template = """
Question: {query}
Response: {answer}
"""
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)
prefix = """You are a {template_ageoption}, and you are going to {template_tasktype_option} ,
you give one answer for each query. it is strictly limited to 1 answer only, and the answer MUST be LESS THAN 200 words.
For a tweet, you SHOULD NOT give more than 280 characters. If it is not to write for a tweet, DO NOT give a tweet suggestion in your answer.
"""
suffix = """
Question: {template_userInput}
Response: """
example_selector = LengthBasedExampleSelector(
examples=examples,
example_prompt=example_prompt,
max_length = numberOfWords
)
new_prompt_template = FewShotPromptTemplate(
example_selector=example_selector, # use example_selector instead of examples
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["template_userInput","template_ageoption","template_tasktype_option"],
example_separator="\n"
)
print(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
response=llm(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
print(response)
return response
#UI Starts here
# Load environment variables
load_dotenv() # Make sure your .env file path is correct
# Streamlit App Configuration
st.set_page_config(page_title="PitchPal: Your Friendly Copy Assistant",
page_icon="💻",
layout="wide",
initial_sidebar_state="collapsed")
# Custom CSS for styling
st.markdown(
"""
<style>
.big-font {
font-size:20px !important;
}
.title-font {
font-size:30px !important;
font-weight: bold;
}
.streamlit-container {
margin-top: 2rem;
}
</style>
""", unsafe_allow_html=True)
# Header Section
st.markdown("<h1 style='text-align: center; color: #1144aa'>PitchPal: Your Efficient Sales Copy Assistant</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: #333'>Craft compelling sales copy with ease</h3>", unsafe_allow_html=True)
st.markdown("<p style='text-align: right; font-size:14px;'>By <a href='https://entzyeung.github.io/portfolio/index.html'>Lorentz Yeung</a></p>", unsafe_allow_html=True)
# User Input Section with Improved Layout
col1, col2 = st.columns(2)
with col1:
form_input = st.text_area('Enter the product or service:', 'PlayStation 6', height=150)
with col2:
tasktype_option = st.selectbox(
'Marketing copy type:',
('Twitter post', 'Sales copy', 'Product description'),
index=1)
age_option = st.selectbox(
'Audience age group:',
('Below 18', '18-45', '46-65', '> 65'),
index=1)
# Submit Button for Generating Sales Copy
submit = st.button("Generate Sales Copy")
if submit:
response = getLLMResponse(form_input, age_option, tasktype_option)
st.markdown("## Generated Sales Copy")
st.write(response) # Display the LLM response
# Note: Ensure that all functions and logic related to LLM response generation are correctly implemented
# and replace placeholder texts and functions with your actual application code.