Spaces:
Runtime error
Runtime error
File size: 2,489 Bytes
0d14433 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import streamlit as st
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Load the GPT2 tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
# Set the maximum length of the generated prompt
max_length = 50
# Define the prompts
prompts = [
"Difficulty sleeping: ",
"Time management: ",
"Stress management: ",
"Healthy eating: ",
"Exercise: ",
"Financial planning: ",
"Communication skills: ",
"Career development: ",
"Relationship issues: ",
"Self-improvement: "
]
# Define the solutions
solutions = [
"Try keeping a consistent sleep schedule and avoid caffeine before bedtime.",
"Use a planner or time-tracking app to prioritize tasks and stay on schedule.",
"Practice mindfulness techniques such as deep breathing or meditation.",
"Incorporate more fruits and vegetables into your diet and limit processed foods.",
"Aim for at least 30 minutes of moderate physical activity daily.",
"Create a budget and track expenses to avoid overspending.",
"Practice active listening and express yourself clearly and assertively.",
"Set clear goals and seek feedback and professional development opportunities.",
"Practice empathy and active communication with your partner or seek professional counseling.",
"Read self-help books, learn new skills or hobbies, and practice self-reflection."
]
# Define the function to generate the prompts
def generate_prompt(prompt):
# Generate the prompt text
prompt_text = prompt + tokenizer.eos_token
# Encode the prompt text
encoded_prompt = tokenizer.encode(prompt_text, return_tensors='pt')
# Generate the prompt output
output = model.generate(encoded_prompt, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2, early_stopping=True)
# Decode the prompt output
output_text = tokenizer.decode(output[0], skip_special_tokens=True)
# Return the generated prompt
return output_text
# Define the streamlit app
def app():
# Set the app title
st.title('Prompt Generator')
# Get the user input
option = st.selectbox('Select a prompt:', prompts)
# Generate the prompt
prompt = generate_prompt(option)
# Display the prompt
st.write('Prompt:', option + prompt)
# Display the solution
st.write('Solution:', solutions[prompts.index(option)])
# Run the streamlit app
if __name__ == '__main__':
app()
|