Manasa1 commited on
Commit
6f13b9a
ยท
verified ยท
1 Parent(s): 7062268

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -85
app.py CHANGED
@@ -1,90 +1,78 @@
1
  import gradio as gr
2
- from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline
 
3
 
4
  # Load the fine-tuned model and tokenizer
5
- model_dir = "Manasa1/finetuned_GPT2w"
6
- fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
7
- fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
8
-
9
- # Define the generator pipeline
10
- generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
11
-
12
- # Function to intelligently add relevant hashtags and emojis
13
- def add_relevant_tags(tweet, input_question):
14
- # Pre-defined mappings of topics to hashtags and emojis
15
- topic_to_hashtags = {
16
- "startup": ["#Startups", "#Innovation", "#Entrepreneurship"],
17
- "AI": ["#AI", "#ArtificialIntelligence", "#Tech"],
18
- "technology": ["#Technology", "#Future", "#Tech"],
19
- "future": ["#Future", "#Vision", "#Tech"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  }
21
- topic_to_emojis = {
22
- "startup": "๐Ÿš€",
23
- "AI": "๐Ÿค–",
24
- "technology": "๐Ÿ’ป",
25
- "future": "๐ŸŒŸ",
26
- }
27
-
28
- # Determine topic from input question (using keywords)
29
- topic = None
30
- for key in topic_to_hashtags.keys():
31
- if key.lower() in input_question.lower():
32
- topic = key
33
- break
34
-
35
- # Add relevant hashtags and emoji if a topic is detected
36
- if topic:
37
- hashtags = " ".join(topic_to_hashtags[topic][:2]) # Take up to 2 hashtags
38
- emoji = topic_to_emojis[topic]
39
- tweet = f"{tweet} {emoji} {hashtags}"
40
- else:
41
- # If no topic is detected, don't add emojis/hashtags
42
- tweet = f"{tweet} #NoTopic"
43
-
44
- return tweet.strip()
45
-
46
- # Function to generate tweet
47
- def generate_tweet(input_question):
48
- # Formulate the prompt with clear guidance for tweet generation
49
- input_text = f"Write a very short, engaging tweet with emojis and relevant hashtags about {input_question}. Keep it between 200 and 280 characters. Provide only the tweet."
50
-
51
- # Generate the output using the pipeline
52
- output = generator(input_text, max_length=280, num_return_sequences=1, temperature=0.7, top_p=0.9)
53
-
54
- # Extract the generated text
55
- tweet = output[0]['generated_text']
56
-
57
- # Extract the tweet part by splitting based on the prompt
58
- tweet = tweet.split(f"Write a very short, engaging tweet with emojis and relevant hashtags about {input_question}")[-1].strip()
59
-
60
- # Ensure the tweet is between 200 and 280 characters
61
- tweet_length = len(tweet)
62
- if tweet_length > 280:
63
- tweet = tweet[:280]
64
- last_period = tweet.rfind(".")
65
- if last_period != -1:
66
- tweet = tweet[:last_period + 1]
67
- elif tweet_length < 200:
68
- tweet = tweet.ljust(200) # Ensure a minimum length of 200 characters
69
-
70
- # Add relevant hashtags and emojis
71
- tweet = add_relevant_tags(tweet, input_question)
72
-
73
- return tweet
74
-
75
- # Gradio interface
76
- def gradio_interface(input_question):
77
- tweet = generate_tweet(input_question)
78
- return tweet
79
-
80
- # Create the Gradio app
81
- iface = gr.Interface(
82
- fn=gradio_interface,
83
- inputs="text",
84
- outputs="text",
85
- title="AI Tweet Generator",
86
- description="Enter a topic, and the model will generate a tweet with relevant hashtags and emojis."
87
- )
88
 
89
- # Launch the app
90
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
3
+ import re
4
 
5
  # Load the fine-tuned model and tokenizer
6
+ try:
7
+ model = GPT2LMHeadModel.from_pretrained("Manasa1/finetuned_GPTb") # Path to your fine-tuned GPT-2 model
8
+ tokenizer = GPT2Tokenizer.from_pretrained("Manasa1/finetuned_GPTb") # Path to tokenizer
9
+ tokenizer.pad_token = tokenizer.eos_token # Ensure pad_token is set correctly
10
+ except Exception as e:
11
+ print(f"Error loading model or tokenizer: {e}")
12
+ exit()
13
+
14
+ # Function to generate an answer to a question
15
+ def generate_answer(question):
16
+ if not question.strip():
17
+ return "Error: Question cannot be empty."
18
+ try:
19
+ prompt = f"Q: {question} A:"
20
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=1024)
21
+ prompt_length = len(inputs["input_ids"][0])
22
+ max_new_tokens = 1024 - prompt_length
23
+ output = model.generate(
24
+ inputs["input_ids"],
25
+ max_new_tokens=max_new_tokens,
26
+ num_return_sequences=1,
27
+ no_repeat_ngram_size=2,
28
+ top_p=0.9,
29
+ top_k=50,
30
+ temperature=0.7,
31
+ do_sample=True,
32
+ pad_token_id=tokenizer.eos_token_id
33
+ )
34
+ answer = tokenizer.decode(output[0], skip_special_tokens=True)
35
+ return answer[len(prompt):].strip() if answer else "Error: Could not generate a meaningful response."
36
+ except Exception as e:
37
+ return f"Error during generation: {e}"
38
+
39
+ # Function to add relevant hashtags and emojis
40
+ def add_hashtags_and_emojis(tweet):
41
+ hashtags_and_emojis = {
42
+ "AI": ["#AI", "๐Ÿค–"],
43
+ "machine learning": ["#MachineLearning", "๐Ÿ“Š"],
44
+ "data": ["#DataScience", "๐Ÿ“ˆ"],
45
+ "technology": ["#Tech", "๐Ÿ’ป"],
46
+ "innovation": ["#Innovation", "โœจ"],
47
+ "coding": ["#Coding", "๐Ÿ‘จโ€๐Ÿ’ป"],
48
+ "future": ["#Future", "๐Ÿ”ฎ"],
49
+ "startup": ["#Startup", "๐Ÿš€"],
50
+ "sustainability": ["#Sustainability", "๐ŸŒฑ"],
51
  }
52
+ tweet_lower = tweet.lower()
53
+ added_items = []
54
+ for keyword, items in hashtags_and_emojis.items():
55
+ if keyword in tweet_lower:
56
+ added_items.extend(items)
57
+ added_items = list(dict.fromkeys(added_items))
58
+ return tweet.strip() + " " + " ".join(added_items)
59
+
60
+ # Function to handle Gradio input and output
61
+ def generate_tweet_with_hashtags(question):
62
+ generated_tweet = generate_answer(question)
63
+ final_tweet = add_hashtags_and_emojis(generated_tweet)
64
+ return final_tweet
65
+
66
+ # Gradio app
67
+ with gr.Blocks() as app:
68
+ gr.Markdown("# AI Tweet Generator with Hashtags and Emojis")
69
+ gr.Markdown("Enter a question or topic, and the app will generate a tweet and enhance it with relevant hashtags and emojis!")
70
+ question_input = gr.Textbox(label="Enter your question or topic:")
71
+ output_tweet = gr.Textbox(label="Generated Tweet with Hashtags and Emojis:", interactive=False)
72
+ generate_button = gr.Button("Generate Tweet")
73
+ generate_button.click(generate_tweet_with_hashtags, inputs=[question_input], outputs=[output_tweet])
74
+
75
+ # Run the app
76
+ if __name__ == "__main__":
77
+ app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78