Codingxx commited on
Commit
fbfc910
·
verified ·
1 Parent(s): 54e8c3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -1,10 +1,19 @@
1
  import gradio as gr
 
2
 
3
- with gr.Blocks(fill_height=True) as demo:
4
- with gr.Sidebar():
5
- gr.Markdown("# Inference Provider")
6
- gr.Markdown("This Space showcases the Qwen/Qwen3-30B-A3B model, served by the fireworks-ai API. Sign in with your Hugging Face account to use this API.")
7
- button = gr.LoginButton("Sign in")
8
- gr.load("models/Qwen/Qwen3-30B-A3B", accept_token=button, provider="fireworks-ai")
9
-
10
- demo.launch()
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Load model and tokenizer
5
+ model_name = "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+
9
+ # Function to generate text
10
+ def generate_text(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt")
12
+ outputs = model.generate(inputs["input_ids"], max_length=50)
13
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+
15
+ # Set up Gradio Interface
16
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
17
+
18
+ # Launch app
19
+ iface.launch()