HumamZaman commited on
Commit
ca0b363
·
verified ·
1 Parent(s): cd496f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -24
app.py CHANGED
@@ -1,27 +1,8 @@
1
- import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
 
5
  model_name = "openai-community/gpt-oss-20b"
 
6
 
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(
9
- model_name,
10
- torch_dtype=torch.float16,
11
- device_map="auto"
12
- )
13
-
14
- def chat(message, history):
15
- inputs = tokenizer(message, return_tensors="pt").to(model.device)
16
- outputs = model.generate(
17
- **inputs,
18
- max_length=200,
19
- do_sample=True,
20
- temperature=0.7,
21
- top_p=0.9
22
- )
23
- reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
- return reply
25
-
26
- iface = gr.ChatInterface(fn=chat)
27
- iface.launch()
 
1
+ import os
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
  model_name = "openai-community/gpt-oss-20b"
5
+ hf_token = os.getenv("HF_TOKEN") # Reads from Space secret
6
 
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token)