Manasa1 commited on
Commit
9d629bd
·
verified ·
1 Parent(s): c685a0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -11,7 +11,7 @@ fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
11
  generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
12
 
13
  # Function to dynamically truncate output while keeping it meaningful
14
- def truncate_tweet(tweet, max_length=240):
15
  # Ensure the tweet is concise, ending on a complete sentence
16
  if len(tweet) > max_length:
17
  tweet = tweet[:max_length]
@@ -53,13 +53,23 @@ def add_relevant_tags(tweet, input_question):
53
  def generate_tweet(input_question):
54
  # Format the input without "Question:" and "Answer:"
55
  prompt = input_question.strip()
56
- # Generate the output
57
- output = generator(prompt, max_length=150, num_return_sequences=1, temperature=0.7, top_p=0.9)
 
 
58
  # Extract the generated text and clean it
59
  tweet = output[0]['generated_text']
60
- tweet = re.sub(r"(Question:|Answer:)", "", tweet).strip() # Remove "Question:" and "Answer:"
61
- tweet = truncate_tweet(tweet) # Truncate to ensure it's concise
62
- tweet = add_relevant_tags(tweet, input_question) # Add relevant hashtags and emojis
 
 
 
 
 
 
 
 
63
  return tweet
64
 
65
  # Create the Gradio interface
@@ -72,4 +82,4 @@ interface = gr.Interface(
72
  )
73
 
74
  # Launch the interface
75
- interface.launch()
 
11
  generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
12
 
13
  # Function to dynamically truncate output while keeping it meaningful
14
+ def truncate_tweet(tweet, max_length=280):
15
  # Ensure the tweet is concise, ending on a complete sentence
16
  if len(tweet) > max_length:
17
  tweet = tweet[:max_length]
 
53
  def generate_tweet(input_question):
54
  # Format the input without "Question:" and "Answer:"
55
  prompt = input_question.strip()
56
+
57
+ # Generate the output with a higher max_length for longer responses
58
+ output = generator(prompt, max_length=300, num_return_sequences=1, temperature=0.7, top_p=0.9)
59
+
60
  # Extract the generated text and clean it
61
  tweet = output[0]['generated_text']
62
+
63
+ # Remove "Question:" and "Answer:" from the generated text
64
+ tweet = re.sub(r"(Question:|Answer:|A:|)", "", tweet).strip()
65
+
66
+ # Remove any part of the tweet that starts with the input question
67
+ tweet = tweet.replace(input_question, "").strip()
68
+
69
+ # Truncate and add relevant hashtags and emojis
70
+ tweet = truncate_tweet(tweet)
71
+ tweet = add_relevant_tags(tweet, input_question)
72
+
73
  return tweet
74
 
75
  # Create the Gradio interface
 
82
  )
83
 
84
  # Launch the interface
85
+ interface.launch()