Spaces:
duan98
/
Sleeping

TDN-M commited on
Commit
8c6978c
verified
1 Parent(s): 15ef8d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -59,14 +59,14 @@ supported_languages = config.languages
59
  if not "vi" in supported_languages:
60
  supported_languages.append("vi")
61
 
62
- # Load LangChain components
63
- model = AutoModelForSeq2SeqLM.from_pretrained("declare-lab/flan-alpaca-large")
64
- tokenizer = AutoTokenizer.from_pretrained("declare-lab/flan-alpaca-large")
65
  pipe = pipeline(
66
  'text2text-generation',
67
  model=model,
68
  tokenizer=tokenizer,
69
- max_length=120
70
  )
71
  local_llm = HuggingFacePipeline(pipeline=pipe)
72
  llm_chain = caption_chain.chain(llm=local_llm)
@@ -180,7 +180,7 @@ def predict(
180
  prompt = normalize_vietnamese_text(prompt)
181
 
182
  # Truncate prompt to fit within the maximum token length
183
- prompt = truncate_prompt(prompt, tokenizer)
184
 
185
  print("I: Generating new audio...")
186
  t0 = time.time()
 
59
  if not "vi" in supported_languages:
60
  supported_languages.append("vi")
61
 
62
+ # Load LangChain components v峄沬 m么 h矛nh m峄沬
63
+ model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-xl")
64
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl")
65
  pipe = pipeline(
66
  'text2text-generation',
67
  model=model,
68
  tokenizer=tokenizer,
69
+ max_length=1024 # C岷璸 nh岷璽 max_length
70
  )
71
  local_llm = HuggingFacePipeline(pipeline=pipe)
72
  llm_chain = caption_chain.chain(llm=local_llm)
 
180
  prompt = normalize_vietnamese_text(prompt)
181
 
182
  # Truncate prompt to fit within the maximum token length
183
+ prompt = truncate_prompt(prompt, tokenizer, max_length=512)
184
 
185
  print("I: Generating new audio...")
186
  t0 = time.time()