Spaces:
Sleeping
Sleeping
File size: 1,302 Bytes
c700888 45e4e45 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 793bd27 c700888 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import warnings
warnings.simplefilter("ignore")
def main():
tokenizer = AutoTokenizer.from_pretrained("Unbabel/TowerBase-13B-v0.1")
model = AutoModelForCausalLM.from_pretrained("Unbabel/TowerBase-13B-v0.1", device="cuda" if st.session_state.use_gpu else "cpu", load_in_4bit=True)
languages = ["English", "Spanish", "Vietnamese", "French", "Portuguese"]
st.sidebar.title("Translation App")
st.sidebar.write("Choose source and target languages:")
source_lang_index = st.sidebar.selectbox("Source Language", languages)
target_lang_index = st.sidebar.selectbox("Target Language", languages)
source_lang = languages.index(source_lang_index)
target_lang = languages.index(target_lang_index)
text = st.text_area(f"Enter text in {source_lang_index}", "")
if st.button("Translate"):
input_text = f"{source_lang_index}: {text}\n{target_lang_index}:"
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=20)
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.write(f"Translation in {target_lang_index}: {translated_text}")
if __name__ == "__main__":
main()
|