Manasa1 commited on
Commit
22a0b97
·
verified ·
1 Parent(s): 0d64cc4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -23
app.py CHANGED
@@ -7,35 +7,36 @@ import json
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
9
  # Replace 'username/your_model_name' with your Hugging Face model name
10
- model_name = "Manasa1/your_model_name"
11
- tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
 
 
 
 
 
14
  def generate_tweet(prompt):
15
- # Tokenize the input
16
- inputs = tokenizer(prompt, return_tensors="pt")
17
-
18
- # Generate text using the model
19
- outputs = model.generate(
20
- inputs["input_ids"],
21
- max_length=280, # Limit tweets to 280 characters
22
- num_return_sequences=1, # Number of tweets to generate
23
- top_k=50, # Sampling from top k tokens
24
- top_p=0.95, # Sampling from top p cumulative probability
25
- temperature=0.7, # Adjust creativity
26
- do_sample=True, # Enable sampling
27
  )
28
-
29
- # Decode the generated text
30
- tweet = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
- return tweet
32
 
 
33
  interface = gr.Interface(
34
- fn=generate_tweet, # The function to call
35
- inputs="text", # User input is a single text box
36
- outputs="text", # Output is text
37
  title="AI Tweet Generator",
38
- description="Enter a topic or a few words, and the AI will generate a creative tweet!"
39
  )
40
 
41
  # Launch the app
 
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
9
  # Replace 'username/your_model_name' with your Hugging Face model name
10
+ model_dir = "Manasa1/your_model_name"
11
+ fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
12
+ fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
13
 
14
+
15
+ # Create a text-generation pipeline
16
+ generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
17
+
18
+ # Function to generate tweets
19
  def generate_tweet(prompt):
20
+ input_prompt = f"{prompt}\n\nTweet:" # Format input for clarity
21
+ output = generator(
22
+ input_prompt,
23
+ max_length=50, # Limit the total length of the generated text
24
+ num_return_sequences=1,
25
+ temperature=0.7, # Control creativity
26
+ top_p=0.9, # Use nucleus sampling
27
+ pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
 
 
 
 
28
  )
29
+ # Extract the generated text and remove the input prompt from the output
30
+ generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
31
+ return generated_tweet
 
32
 
33
+ # Gradio Interface
34
  interface = gr.Interface(
35
+ fn=generate_tweet,
36
+ inputs=gr.inputs.Textbox(label="Prompt", placeholder="Enter a topic for the tweet (e.g., AI, technology)"),
37
+ outputs=gr.outputs.Textbox(label="Generated Tweet"),
38
  title="AI Tweet Generator",
39
+ description="Enter a topic or phrase, and the AI will generate a creative tweet. Powered by a fine-tuned GPT-2 model."
40
  )
41
 
42
  # Launch the app