lorentz commited on
Commit
053628c
·
verified ·
1 Parent(s): 5d4f956

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -83
app.py CHANGED
@@ -5,92 +5,65 @@ from langchain import FewShotPromptTemplate
5
  from langchain.prompts.example_selector import LengthBasedExampleSelector
6
  from dotenv import load_dotenv
7
 
8
- load_dotenv() # load the env-sample.txt file
 
 
 
9
 
10
- def getLLMResponse(query, age_option,tasktype_option):
11
- examples = []
12
- llm = OpenAI(temperature=.9, model="gpt-3.5-turbo-instruct")
13
-
14
-
15
- example_template = """
16
- Question: {query}
17
- Response: {answer}
18
- """
19
-
20
- example_prompt = PromptTemplate(
21
- input_variables=["query", "answer"],
22
- template=example_template
23
- )
24
-
25
-
26
- prefix = """You are a {template_ageoption}, and you are going to {template_tasktype_option} ,
27
- you give one answer for each query. it is strictly limited to 1 answer only, and the answer MUST be LESS THAN 200 words.
28
- For a tweet, you SHOULD NOT give more than 280 characters. If it is not to write for a tweet, DO NOT give a tweet suggestion in your answer.
29
- """
30
-
31
- suffix = """
32
- Question: {template_userInput}
33
- Response: """
34
-
35
- example_selector = LengthBasedExampleSelector(
36
- examples=examples,
37
- example_prompt=example_prompt,
38
- max_length = numberOfWords
39
- )
40
-
41
-
42
- new_prompt_template = FewShotPromptTemplate(
43
- example_selector=example_selector, # use example_selector instead of examples
44
- example_prompt=example_prompt,
45
- prefix=prefix,
46
- suffix=suffix,
47
- input_variables=["template_userInput","template_ageoption","template_tasktype_option"],
48
- example_separator="\n"
49
- )
50
-
51
-
52
- print(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
53
- response=llm(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
54
- print(response)
55
-
56
- return response
57
-
58
- #UI Starts here
59
 
 
60
  st.set_page_config(page_title="PitchPal: Your Friendly Copy Assistant",
61
- page_icon='💻',
62
- layout='centered',
63
- initial_sidebar_state='collapsed')
64
-
65
-
66
- st.markdown("<h1 style='text-align: center'>PitchPal</h1>", unsafe_allow_html=True)
67
- st.markdown("<h3 style='text-align: center'>Your Efficient Sales Copy Assistant</h2>", unsafe_allow_html=True)
68
- st.markdown("<p style='text-align: right'>By <a href='https://entzyeung.github.io/portfolio/index.html'>Lorentz Yeung</a></p>", unsafe_allow_html=True)
69
-
70
-
71
- #st.title("PitchPal")
72
- #st.subheader("Your Friendly Sales Copy Assistant")
73
- #st.markdown(
74
- # """
75
- # - by [Lorentz Yeung]()
76
- # """
77
- # )
78
 
79
- form_input = st.text_area('Enter the name of the product or service you want to promote: ', 'PlayStation 6', height=100)
80
- # st.write(f'You wrote {len(form_input)} characters.')
81
-
82
- tasktype_option = st.selectbox(
83
- 'Choose the type of marketing copy you want to generate: ',
84
- ('Draft a Twitter post', 'Draft a sales copy', 'Draft a product description'),key=1)
85
-
86
- age_option= st.selectbox(
87
- 'Select the age group of your intended audience: ',
88
- ('below age 18' ,'age 18-45', 'age 46-65', 'age > 65'),key=2)
89
-
90
- # numberOfWords= st.slider('Words limit', 1, 200, 25)
91
- numberOfWords = 40 # the new model doesn't support this.
92
-
93
- submit = st.button("Generate Your Sales Copy")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  if submit:
96
- st.write(getLLMResponse(form_input,tasktype_option,age_option))
 
 
 
 
 
 
5
  from langchain.prompts.example_selector import LengthBasedExampleSelector
6
  from dotenv import load_dotenv
7
 
8
+ # Function Definitions (Assuming unchanged, add your actual logic here)
9
+ def getLLMResponse(query, age_option, tasktype_option):
10
+ # Placeholder for your function's logic. Ensure you replace it with your actual code.
11
+ return "Response from LLM based on the query and options provided."
12
 
13
+ # Load environment variables
14
+ load_dotenv() # Make sure your .env file path is correct
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Streamlit App Configuration
17
  st.set_page_config(page_title="PitchPal: Your Friendly Copy Assistant",
18
+ page_icon="💻",
19
+ layout="wide",
20
+ initial_sidebar_state="collapsed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # Custom CSS for styling
23
+ st.markdown(
24
+ """
25
+ <style>
26
+ .big-font {
27
+ font-size:20px !important;
28
+ }
29
+ .title-font {
30
+ font-size:30px !important;
31
+ font-weight: bold;
32
+ }
33
+ .streamlit-container {
34
+ margin-top: 2rem;
35
+ }
36
+ </style>
37
+ """, unsafe_allow_html=True)
38
+
39
+ # Header Section
40
+ st.markdown("<h1 style='text-align: center; color: #1144aa'>PitchPal: Your Efficient Sales Copy Assistant</h1>", unsafe_allow_html=True)
41
+ st.markdown("<h3 style='text-align: center; color: #333'>Craft compelling sales copy with ease</h3>", unsafe_allow_html=True)
42
+ st.markdown("<p style='text-align: right; font-size:14px;'>By <a href='https://entzyeung.github.io/portfolio/index.html'>Lorentz Yeung</a></p>", unsafe_allow_html=True)
43
+
44
+ # User Input Section with Improved Layout
45
+ col1, col2 = st.columns(2)
46
+
47
+ with col1:
48
+ form_input = st.text_area('Enter the product or service:', 'PlayStation 6', height=150)
49
+
50
+ with col2:
51
+ tasktype_option = st.selectbox(
52
+ 'Marketing copy type:',
53
+ ('Twitter post', 'Sales copy', 'Product description'),
54
+ index=1)
55
+ age_option = st.selectbox(
56
+ 'Audience age group:',
57
+ ('Below 18', '18-45', '46-65', '> 65'),
58
+ index=1)
59
+
60
+ # Submit Button for Generating Sales Copy
61
+ submit = st.button("Generate Sales Copy")
62
 
63
  if submit:
64
+ response = getLLMResponse(form_input, age_option, tasktype_option)
65
+ st.markdown("## Generated Sales Copy")
66
+ st.write(response) # Display the LLM response
67
+
68
+ # Note: Ensure that all functions and logic related to LLM response generation are correctly implemented
69
+ # and replace placeholder texts and functions with your actual application code.