Manasa1 commited on
Commit
c685a0d
Β·
verified Β·
1 Parent(s): f807b07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -7
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
 
3
 
4
  # Load the fine-tuned GPT-2 model and tokenizer
5
  model_dir = "Manasa1/finetuned_GPT23"
@@ -9,21 +10,65 @@ fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
9
  # Create a text-generation pipeline
10
  generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def generate_tweet(input_question):
13
- # Format the prompt
14
- prompt = f"Question: {input_question} Answer:"
15
  # Generate the output
16
- output = generator(prompt, max_length=250, num_return_sequences=1, temperature=0.9, top_p=0.9)
17
- # Extract and return the generated text
18
- return output[0]['generated_text']
 
 
 
 
19
 
20
  # Create the Gradio interface
21
  interface = gr.Interface(
22
  fn=generate_tweet,
23
- inputs=gr.Textbox(label="Enter a prompt/question", placeholder="Write a tweet about startup."),
24
  outputs=gr.Textbox(label="Generated Tweet"),
25
  title="Tweet Generator",
26
- description="Generate tweets based on prompts using a fine-tuned GPT-2 model."
27
  )
28
 
29
  # Launch the interface
 
1
  import gradio as gr
2
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
3
+ import re
4
 
5
  # Load the fine-tuned GPT-2 model and tokenizer
6
  model_dir = "Manasa1/finetuned_GPT23"
 
10
  # Create a text-generation pipeline
11
  generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
12
 
13
+ # Function to dynamically truncate output while keeping it meaningful
14
+ def truncate_tweet(tweet, max_length=240):
15
+ # Ensure the tweet is concise, ending on a complete sentence
16
+ if len(tweet) > max_length:
17
+ tweet = tweet[:max_length]
18
+ last_period = tweet.rfind(".")
19
+ if last_period != -1:
20
+ tweet = tweet[:last_period + 1]
21
+ return tweet.strip()
22
+
23
+ # Function to intelligently add relevant hashtags and emojis
24
+ def add_relevant_tags(tweet, input_question):
25
+ # Pre-defined mappings of topics to hashtags and emojis
26
+ topic_to_hashtags = {
27
+ "startup": ["#Startups", "#Innovation", "#Entrepreneurship"],
28
+ "AI": ["#AI", "#ArtificialIntelligence", "#Tech"],
29
+ "technology": ["#Technology", "#Future", "#Tech"],
30
+ "future": ["#Future", "#Vision", "#Tech"],
31
+ }
32
+ topic_to_emojis = {
33
+ "startup": "πŸš€",
34
+ "AI": "πŸ€–",
35
+ "technology": "πŸ’»",
36
+ "future": "🌟",
37
+ }
38
+
39
+ # Determine topic from input question (using keywords)
40
+ topic = None
41
+ for key in topic_to_hashtags.keys():
42
+ if key.lower() in input_question.lower():
43
+ topic = key
44
+ break
45
+
46
+ # Add relevant hashtags and emoji if a topic is detected
47
+ if topic:
48
+ hashtags = " ".join(topic_to_hashtags[topic][:2]) # Take up to 2 hashtags
49
+ emoji = topic_to_emojis[topic]
50
+ tweet = f"{tweet} {emoji} {hashtags}"
51
+ return tweet.strip()
52
+
53
  def generate_tweet(input_question):
54
+ # Format the input without "Question:" and "Answer:"
55
+ prompt = input_question.strip()
56
  # Generate the output
57
+ output = generator(prompt, max_length=150, num_return_sequences=1, temperature=0.7, top_p=0.9)
58
+ # Extract the generated text and clean it
59
+ tweet = output[0]['generated_text']
60
+ tweet = re.sub(r"(Question:|Answer:)", "", tweet).strip() # Remove "Question:" and "Answer:"
61
+ tweet = truncate_tweet(tweet) # Truncate to ensure it's concise
62
+ tweet = add_relevant_tags(tweet, input_question) # Add relevant hashtags and emojis
63
+ return tweet
64
 
65
  # Create the Gradio interface
66
  interface = gr.Interface(
67
  fn=generate_tweet,
68
+ inputs=gr.Textbox(label="Enter a prompt/question", placeholder="Write a tweet about AI."),
69
  outputs=gr.Textbox(label="Generated Tweet"),
70
  title="Tweet Generator",
71
+ description="Generate concise, relevant tweets enriched with appropriate emojis and hashtags using a fine-tuned GPT-2 model."
72
  )
73
 
74
  # Launch the interface