Update app.py
Browse files
app.py
CHANGED
@@ -59,14 +59,14 @@ supported_languages = config.languages
|
|
59 |
if not "vi" in supported_languages:
|
60 |
supported_languages.append("vi")
|
61 |
|
62 |
-
# Load LangChain components
|
63 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("
|
64 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
65 |
pipe = pipeline(
|
66 |
'text2text-generation',
|
67 |
model=model,
|
68 |
tokenizer=tokenizer,
|
69 |
-
max_length=
|
70 |
)
|
71 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|
72 |
llm_chain = caption_chain.chain(llm=local_llm)
|
@@ -180,7 +180,7 @@ def predict(
|
|
180 |
prompt = normalize_vietnamese_text(prompt)
|
181 |
|
182 |
# Truncate prompt to fit within the maximum token length
|
183 |
-
prompt = truncate_prompt(prompt, tokenizer)
|
184 |
|
185 |
print("I: Generating new audio...")
|
186 |
t0 = time.time()
|
|
|
59 |
if not "vi" in supported_languages:
|
60 |
supported_languages.append("vi")
|
61 |
|
62 |
+
# Load LangChain components v峄沬 m么 h矛nh m峄沬
|
63 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-xl")
|
64 |
+
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl")
|
65 |
pipe = pipeline(
|
66 |
'text2text-generation',
|
67 |
model=model,
|
68 |
tokenizer=tokenizer,
|
69 |
+
max_length=1024 # C岷璸 nh岷璽 max_length
|
70 |
)
|
71 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|
72 |
llm_chain = caption_chain.chain(llm=local_llm)
|
|
|
180 |
prompt = normalize_vietnamese_text(prompt)
|
181 |
|
182 |
# Truncate prompt to fit within the maximum token length
|
183 |
+
prompt = truncate_prompt(prompt, tokenizer, max_length=512)
|
184 |
|
185 |
print("I: Generating new audio...")
|
186 |
t0 = time.time()
|