Mahmoud Amiri commited on
Commit
fad1354
·
1 Parent(s): f052f35

Fix inputs, add OAuth token

Browse files
Files changed (1) hide show
  1. app.py +43 -36
app.py CHANGED
@@ -1,53 +1,60 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
 
5
  def respond(
6
- message,
7
  history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
  ):
 
14
  client = InferenceClient(
15
- token=hf_token.token,
16
- model="Bocklitz-Lab/lit2vec-tldr-bart-model"
17
  )
18
 
19
- full_input = f"{system_message.strip()}\n\n{message.strip()}"
20
 
21
- response = client.text_generation(
22
- full_input,
 
23
  max_new_tokens=max_tokens,
24
  temperature=temperature,
25
  top_p=top_p,
26
- stream=False
27
- )
28
-
29
- yield response
30
 
31
 
32
- chatbot = gr.ChatInterface(
33
- respond,
34
- chatbot=gr.Chatbot(type="messages"),
35
- textbox=gr.Textbox(placeholder="Paste abstract of a chemistry paper...", container=False, scale=7),
36
- additional_inputs=[
37
- gr.Textbox(value="Summarize this chemistry paper abstract:", label="System message"),
38
- gr.Slider(minimum=16, maximum=1024, value=256, step=8, label="Max new tokens"),
39
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
40
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
41
- ],
42
- type="messages",
43
- )
44
-
45
- demo = gr.Blocks()
46
-
47
- with demo:
48
  with gr.Sidebar():
49
- gr.LoginButton()
50
- chatbot.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- # 👇 This MUST be called at the module level for Hugging Face Spaces to work
53
- demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient, OAuthToken
3
+
4
+ MODEL_ID = "Bocklitz-Lab/lit2vec-tldr-bart-model"
5
 
6
 
7
  def respond(
8
+ message: str,
9
  history: list[dict[str, str]],
10
+ system_message: str,
11
+ max_tokens: int,
12
+ temperature: float,
13
+ top_p: float,
14
+ hf_token: OAuthToken | None,
15
  ):
16
+ """Summarise a chemistry abstract with the HF Inference API."""
17
  client = InferenceClient(
18
+ model=MODEL_ID,
19
+ token=None if hf_token is None else hf_token.token,
20
  )
21
 
22
+ prompt = f"{system_message.strip()}\n\n{message.strip()}"
23
 
24
+ # stream=False → one-shot; stream=True → token generator
25
+ for chunk in client.text_generation(
26
+ prompt,
27
  max_new_tokens=max_tokens,
28
  temperature=temperature,
29
  top_p=top_p,
30
+ stream=True, # change to False if you prefer
31
+ ):
32
+ yield chunk
 
33
 
34
 
35
+ with gr.Blocks(title="🧪 Chemistry Abstract Summariser") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with gr.Sidebar():
37
+ login_btn = gr.LoginButton()
38
+
39
+ chatbot = gr.ChatInterface(
40
+ respond,
41
+ chatbot=gr.Chatbot(type="messages"),
42
+ textbox=gr.Textbox(
43
+ placeholder="Paste abstract of a chemistry paper…",
44
+ lines=8,
45
+ ),
46
+ additional_inputs=[
47
+ gr.Textbox(
48
+ value="Summarise this chemistry paper abstract:",
49
+ label="System message",
50
+ ),
51
+ gr.Slider(16, 1024, value=256, step=8, label="Max new tokens"),
52
+ gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
53
+ gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
54
+ login_btn, # 👈 passes OAuthToken to respond()
55
+ ],
56
+ type="messages",
57
+ )
58
 
59
+ if __name__ == "__main__":
60
+ demo.launch() # Hugging Face Spaces picks this up automatically