malvin noel commited on
Commit
469a509
·
1 Parent(s): 4098c08

Best prompt and model

Browse files
Files changed (1) hide show
  1. scripts/generate_scripts.py +8 -4
scripts/generate_scripts.py CHANGED
@@ -13,7 +13,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
13
 
14
  @spaces.GPU()
15
  def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
16
- model_id = "Qwen/Qwen2.5-0.5B"
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
18
 
19
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
@@ -33,9 +33,13 @@ def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float =
33
 
34
  def generate_script(prompt: str, word_count: int = 60) -> str:
35
  system_prompt = (
36
- "You are a professional video scriptwriter. "
37
- f"Write a script for a short YouTube video about: {prompt.strip()}.\n"
38
- f"The video must be {word_count} words long, engaging, clear, and formatted as plain text."
 
 
 
 
39
  )
40
  return generate_local(system_prompt)
41
 
 
13
 
14
  @spaces.GPU()
15
  def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
16
+ model_id = "Qwen/Qwen2.5-1.5B"
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
18
 
19
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
 
33
 
34
  def generate_script(prompt: str, word_count: int = 60) -> str:
35
  system_prompt = (
36
+ "You are a professional scriptwriter. "
37
+ "Generate the *exact spoken words* for a short narration in a YouTube video. "
38
+ f"The topic is: {prompt.strip()}. "
39
+ f"The script must be exactly {word_count} words long. "
40
+ "Do NOT include stage directions, scene descriptions, or formatting. "
41
+ "Only output the spoken words, as they should be said, in plain text. "
42
+ "Make it clear, engaging, and natural for a voiceover."
43
  )
44
  return generate_local(system_prompt)
45