Manasa1 commited on
Commit
d29b4df
·
verified ·
1 Parent(s): 0572819

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -8,16 +8,16 @@ model = AutoModelForCausalLM.from_pretrained("Manasa1/gpt-finetuned-tweets")
8
  tokenizer = AutoTokenizer.from_pretrained("Manasa1/gpt-finetuned-tweets")
9
 
10
 
11
- # Define the function to generate tweets
12
  def generate_tweet():
13
- prompt = "Generate a tweet that reflects the personality in the fine-tuned dataset:"
14
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
15
  outputs = model.generate(
16
  inputs["input_ids"],
17
- max_length=280,
18
  num_return_sequences=1,
19
- top_p=0.9,
20
- temperature=0.7
 
21
  )
22
  generated_tweet = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  return generated_tweet.strip()
 
8
  tokenizer = AutoTokenizer.from_pretrained("Manasa1/gpt-finetuned-tweets")
9
 
10
 
 
11
  def generate_tweet():
12
+ prompt = "Write a concise, creative tweet reflecting the style and personality in the fine-tuned dataset."
13
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=100)
14
  outputs = model.generate(
15
  inputs["input_ids"],
16
+ max_length=140,
17
  num_return_sequences=1,
18
+ top_p=0.8,
19
+ temperature=0.6,
20
+ repetition_penalty=1.2, # Penalizes repetitive tokens
21
  )
22
  generated_tweet = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  return generated_tweet.strip()