burman-ai commited on
Commit
f4572b6
·
verified ·
1 Parent(s): b406397

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -29
app.py CHANGED
@@ -1,40 +1,27 @@
1
- import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
  # Load tokenizer and model
6
- tokenizer = AutoTokenizer.from_pretrained("Simbolo-Servicio/Myanmarsar-GPT")
7
- model = AutoModelForCausalLM.from_pretrained("Simbolo-Servicio/Myanmarsar-GPT")
8
 
9
- # Use GPU if available
10
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  model.to(device)
12
 
13
- # Define generation function
14
- def generate_text(prompt, max_length=100, temperature=0.7):
15
- input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)
16
- output = model.generate(
17
- input_ids,
18
- max_length=max_length,
19
- do_sample=True,
20
- temperature=temperature,
21
- top_k=50,
22
- top_p=0.95
23
- )
24
- result = tokenizer.decode(output[0], skip_special_tokens=True)
25
- return result
26
 
27
- # Create Gradio interface
28
- iface = gr.Interface(
29
- fn=generate_text,
30
- inputs=[
31
- gr.Textbox(lines=2, label="Myanmar Prompt"),
32
- gr.Slider(20, 300, step=10, label="Max Length", value=100),
33
- gr.Slider(0.1, 1.5, step=0.1, label="Temperature", value=0.7)
34
- ],
35
- outputs="text",
36
- title="Myanmarsar-GPT Text Generator",
37
- description="Type Burmese text and let the model generate a continuation."
38
  )
39
 
40
- iface.launch()
 
 
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
 
4
  # Load tokenizer and model
5
+ tokenizer = AutoTokenizer.from_pretrained("simbolo-ai/Myanmarsar-GPT")
6
+ model = AutoModelForCausalLM.from_pretrained("simbolo-ai/Myanmarsar-GPT")
7
 
8
+ # Move model to GPU if available
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
+ # Input text
13
+ input_text = "Marketing"
14
+ input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Generate output
17
+ output = model.generate(
18
+ input_ids,
19
+ max_length=256,
20
+ do_sample=True,
21
+ temperature=0.7,
22
+ top_k=50,
23
+ top_p=0.95
 
 
 
24
  )
25
 
26
+ # Decode and print
27
+ print(tokenizer.decode(output[0], skip_special_tokens=True))