Spaces:
Sleeping
Sleeping
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -568,7 +568,7 @@ def summarize(
|
|
568 |
prefix = "summarize: "
|
569 |
ret = ""
|
570 |
|
571 |
-
for index in math.ceil( len(words) / 512 ):
|
572 |
|
573 |
chunk = " ".join(words[ index*512:(index+1)*512 ])
|
574 |
inputs = tokenizer.encode( prefix + chunk, return_tensors="pt", max_length=float('inf'), truncation=False)
|
@@ -722,7 +722,7 @@ def translate(txt,to_lang="en",from_lang=False):
|
|
722 |
prefix = f"translate {language_codes[from_lang]} to {language_codes[to_lang]}: "
|
723 |
words = txt.split()
|
724 |
ret = ""
|
725 |
-
for index in math.ceil( len(words) / 512 ):
|
726 |
chunk = " ".join(words[ index*512:(index+1)*512 ])
|
727 |
inputs = tokenizer.encode(prefix+chunk, return_tensors="pt", max_length=float('inf'), truncation=False)
|
728 |
gen = model.generate(chunk,input)
|
|
|
568 |
prefix = "summarize: "
|
569 |
ret = ""
|
570 |
|
571 |
+
for index in range(math.ceil( len(words) / 512 )):
|
572 |
|
573 |
chunk = " ".join(words[ index*512:(index+1)*512 ])
|
574 |
inputs = tokenizer.encode( prefix + chunk, return_tensors="pt", max_length=float('inf'), truncation=False)
|
|
|
722 |
prefix = f"translate {language_codes[from_lang]} to {language_codes[to_lang]}: "
|
723 |
words = txt.split()
|
724 |
ret = ""
|
725 |
+
for index in range(math.ceil( len(words) / 512 )):
|
726 |
chunk = " ".join(words[ index*512:(index+1)*512 ])
|
727 |
inputs = tokenizer.encode(prefix+chunk, return_tensors="pt", max_length=float('inf'), truncation=False)
|
728 |
gen = model.generate(chunk,input)
|