24Sureshkumar commited on
Commit
048c81d
Β·
verified Β·
1 Parent(s): 099984d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -30
app.py CHANGED
@@ -1,57 +1,56 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
- # Cache and load models only once
7
  @st.cache_resource
8
  def load_all_models():
 
 
 
 
 
 
 
9
  translation_pipeline = pipeline(
10
- "text2text-generation",
11
- model="ai4bharat/indictrans2-indic-en-dist-200M",
12
- tokenizer="ai4bharat/indictrans2-indic-en-dist-200M",
13
- device=0 if torch.cuda.is_available() else -1,
14
- trust_remote_code=True # Required for custom tokenizer
15
  )
16
 
 
17
  img_pipe = StableDiffusionPipeline.from_pretrained(
18
- "runwayml/stable-diffusion-v1-5",
19
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
20
  )
21
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
22
-
23
  return translation_pipeline, img_pipe
24
 
25
-
26
  def main():
27
- st.title("🧠 Tamil to English Image Generator")
28
 
29
- tamil_text = st.text_area("✍️ Enter Tamil word or sentence:")
30
 
31
- if st.button("Translate & Generate Image"):
32
- if tamil_text.strip() == "":
33
- st.warning("⚠️ Please enter some Tamil text.")
34
  return
35
 
36
- with st.spinner("⏳ Loading models..."):
37
  translation_pipeline, img_pipe = load_all_models()
38
 
39
- # Format input for IndicTrans2
40
- formatted_input = f"<2ta> <2en> {tamil_text.strip()}"
 
41
 
42
- try:
43
- translated = translation_pipeline(formatted_input)[0]["translation_text"]
44
- except Exception as e:
45
- st.error(f"❌ Translation failed: {e}")
46
- return
47
 
48
- st.success("βœ… Translation Successful!")
49
- st.markdown(f"**πŸ—£οΈ English Translation:** {translated}")
50
-
51
- with st.spinner("🎨 Generating image..."):
52
- image = img_pipe(translated).images[0]
53
- st.image(image, caption="πŸ–ΌοΈ AI-Generated Image", use_column_width=True)
54
 
 
 
55
 
56
  if __name__ == "__main__":
57
  main()
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
+ # Cache models for faster loading
7
  @st.cache_resource
8
  def load_all_models():
9
+ # Translation model
10
+ translation_model = AutoModelForSeq2SeqLM.from_pretrained(
11
+ "ai4bharat/indictrans2-indic-en-dist-200M", trust_remote_code=True
12
+ )
13
+ translation_tokenizer = AutoTokenizer.from_pretrained(
14
+ "ai4bharat/indictrans2-indic-en-dist-200M", trust_remote_code=True
15
+ )
16
  translation_pipeline = pipeline(
17
+ "text2text-generation", model=translation_model, tokenizer=translation_tokenizer
 
 
 
 
18
  )
19
 
20
+ # Image generation model (Stable Diffusion)
21
  img_pipe = StableDiffusionPipeline.from_pretrained(
22
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
 
23
  )
24
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
25
+
26
  return translation_pipeline, img_pipe
27
 
 
28
  def main():
29
+ st.title("πŸ“˜ Tamil to English Translator & Image Generator")
30
 
31
+ tamil_text = st.text_area("πŸ“ Enter Tamil text (word or sentence)", height=100)
32
 
33
+ if st.button("πŸ”„ Translate & Generate Image"):
34
+ if not tamil_text.strip():
35
+ st.warning("Please enter some Tamil text.")
36
  return
37
 
38
+ try:
39
  translation_pipeline, img_pipe = load_all_models()
40
 
41
+ # Prepare translation input
42
+ formatted_input = "<2en><|ta|>" + tamil_text.strip()
43
+ translated = translation_pipeline(formatted_input, max_length=256)[0]["generated_text"]
44
 
45
+ st.success("βœ… English Translation:")
46
+ st.write(translated)
 
 
 
47
 
48
+ with st.spinner("πŸ–ΌοΈ Generating image..."):
49
+ image = img_pipe(translated).images[0]
50
+ st.image(image, caption="πŸ–ΌοΈ Generated from English text")
 
 
 
51
 
52
+ except Exception as e:
53
+ st.error(f"❌ Error: {str(e)}")
54
 
55
  if __name__ == "__main__":
56
  main()