Codingxx commited on
Commit
c3f5ae6
·
verified ·
1 Parent(s): fbfc910

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,19 +1,19 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load model and tokenizer
5
  model_name = "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Function to generate text
10
  def generate_text(prompt):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
  outputs = model.generate(inputs["input_ids"], max_length=50)
13
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
- # Set up Gradio Interface
16
  iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
17
 
18
- # Launch app
19
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Load the model and tokenizer from Hugging Face
5
  model_name = "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
+ # Define a function to generate text from the model
10
  def generate_text(prompt):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
  outputs = model.generate(inputs["input_ids"], max_length=50)
13
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
+ # Create the Gradio interface
16
  iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
17
 
18
+ # Launch the app
19
  iface.launch()