Manasa1 commited on
Commit
a78a40d
·
verified ·
1 Parent(s): 31c35db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -41
app.py CHANGED
@@ -1,44 +1,44 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
-
6
- # Load model and tokenizer directly from Hugging Face Hub
7
- model = AutoModelForCausalLM.from_pretrained("Manasa1/Llama-2-7b-chat-finetune")
8
- tokenizer = AutoTokenizer.from_pretrained("Manasa1/Llama-2-7b-chat-finetune")
9
-
10
-
11
- def generate_tweet():
12
- prompt = "Write a concise, creative tweet reflecting the style and personality in the fine-tuned dataset."
13
- # Tokenize the input prompt
14
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=100, padding=True)
15
-
16
- # Explicitly set the pad_token_id
17
- model.config.pad_token_id = model.config.eos_token_id
18
-
19
- # Generate the tweet with the attention mask
20
- outputs = model.generate(
21
- inputs["input_ids"],
22
- attention_mask=inputs["attention_mask"], # Pass attention_mask explicitly
23
- max_length=140,
24
- num_return_sequences=1,
25
- top_p=0.8,
26
- temperature=0.6,
27
- repetition_penalty=1.2, # Penalize repetition
28
- )
29
- # Decode and return the generated tweet
30
- generated_tweet = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
- return generated_tweet.strip()
32
-
33
-
34
- # Gradio Interface
35
- with gr.Blocks() as app:
36
- gr.Markdown("# AI Tweet Generator")
37
- gr.Markdown("Click the button below to generate a tweet reflecting the fine-tuned personality.")
38
- generate_button = gr.Button("Generate")
39
- output_box = gr.Textbox(label="Generated Tweet")
40
-
41
- generate_button.click(generate_tweet, inputs=None, outputs=output_box)
42
-
43
- # Launch the app locally
44
- app.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load pre-trained model (or fine-tuned model)
5
+ model_name = "Manasa1/GPT_Finetuned_tweets" # Replace with the fine-tuned model name
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Function to generate tweets
10
+ def generate_tweet(input_text):
11
+ prompt = ("You are a tech-savvy, forward-thinking individual with a deep understanding of technology, innovation, and cultural trends. "
12
+ "Craft a tweet that reflects insightful commentary, wit, or actionable advice based on the following idea: \"{}\". "
13
+ "Ensure the response is concise, engaging, and suitable for a diverse audience on social media. "
14
+ "Incorporate elements of thought leadership, futuristic perspectives, and practical wisdom where appropriate.").format(input_text)
15
+
16
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
17
+ outputs = model.generate(inputs['input_ids'], max_length=280, num_return_sequences=1, top_p=0.95, top_k=50)
18
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+
20
+ # Extract the tweet text (exclude prompt if included)
21
+ return generated_text.replace(prompt, "").strip()
22
+
23
+ # Gradio interface
24
+ def main():
25
+ with gr.Blocks() as interface:
26
+ gr.Markdown("""
27
+ # Tweet Generator
28
+ Enter a topic or idea, and the AI will craft a tweet inspired by innovative, philosophical, and tech-savvy thought leadership.
29
+ """)
30
+
31
+ with gr.Row():
32
+ input_text = gr.Textbox(label="Enter your idea or topic:")
33
+ output_tweet = gr.Textbox(label="Generated Tweet:", interactive=False)
34
+
35
+ generate_button = gr.Button("Generate Tweet")
36
+
37
+ generate_button.click(generate_tweet, inputs=[input_text], outputs=[output_tweet])
38
+
39
+ return interface
40
+
41
+ # Run Gradio app
42
+ if __name__ == "__main__":
43
+ app = main()
44
+ app.launch()