Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,17 +2,18 @@ import gradio as gr
|
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline
|
3 |
|
4 |
# Replace 'username/your_model_name' with your Hugging Face model name
|
5 |
-
model_dir = "Manasa1/your_model_name"
|
6 |
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
|
7 |
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
|
8 |
|
9 |
-
|
10 |
# Create a text-generation pipeline
|
11 |
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
|
12 |
|
13 |
# Function to generate tweets
|
14 |
def generate_tweet(prompt):
|
15 |
-
|
|
|
|
|
16 |
output = generator(
|
17 |
input_prompt,
|
18 |
max_length=150, # Limit the total length of the generated text
|
@@ -21,6 +22,7 @@ def generate_tweet(prompt):
|
|
21 |
top_p=0.9, # Use nucleus sampling
|
22 |
pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
|
23 |
)
|
|
|
24 |
# Extract the generated text and remove the input prompt from the output
|
25 |
generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
|
26 |
return generated_tweet
|
@@ -35,4 +37,5 @@ interface = gr.Interface(
|
|
35 |
)
|
36 |
|
37 |
# Launch the app
|
38 |
-
interface.launch()
|
|
|
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline
|
3 |
|
4 |
# Replace 'username/your_model_name' with your Hugging Face model name
|
5 |
+
model_dir = "Manasa1/your_model_name" # Make sure to use your actual model path
|
6 |
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
|
7 |
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
|
8 |
|
|
|
9 |
# Create a text-generation pipeline
|
10 |
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
|
11 |
|
12 |
# Function to generate tweets
|
13 |
def generate_tweet(prompt):
|
14 |
+
# Updated input prompt to encourage creativity and engagement
|
15 |
+
input_prompt = f"Write a creative and engaging tweet about {prompt}. Keep it concise and interesting, and include a call to action or a question for followers to engage with."
|
16 |
+
|
17 |
output = generator(
|
18 |
input_prompt,
|
19 |
max_length=150, # Limit the total length of the generated text
|
|
|
22 |
top_p=0.9, # Use nucleus sampling
|
23 |
pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
|
24 |
)
|
25 |
+
|
26 |
# Extract the generated text and remove the input prompt from the output
|
27 |
generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
|
28 |
return generated_tweet
|
|
|
37 |
)
|
38 |
|
39 |
# Launch the app
|
40 |
+
interface.launch()
|
41 |
+
|