Mahmoud Amiri commited on
Commit
dc732ec
·
1 Parent(s): 6896f48

Fix inputs, add OAuth token

Browse files
Files changed (1) hide show
  1. app.py +63 -40
app.py CHANGED
@@ -1,60 +1,83 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient, OAuthToken
3
-
4
- MODEL_ID = "Bocklitz-Lab/lit2vec-tldr-bart-model"
5
-
6
 
 
7
  def respond(
8
  message: str,
9
- history: list[dict[str, str]],
10
  system_message: str,
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
14
- hf_token: OAuthToken | None,
15
  ):
16
- """Summarise a chemistry abstract with the HF Inference API."""
 
 
17
  client = InferenceClient(
18
- model=MODEL_ID,
19
- token=None if hf_token is None else hf_token.token,
20
  )
21
 
22
- prompt = f"{system_message.strip()}\n\n{message.strip()}"
 
 
 
23
 
24
- # stream=False one-shot; stream=True → token generator
25
- for chunk in client.text_generation(
26
- prompt,
27
- max_new_tokens=max_tokens,
28
  temperature=temperature,
29
  top_p=top_p,
30
- stream=True, # change to False if you prefer
31
  ):
32
- yield chunk
33
-
34
-
35
- with gr.Blocks(title="🧪 Chemistry Abstract Summariser") as demo:
36
- with gr.Sidebar():
37
- login_btn = gr.LoginButton()
38
 
39
- chatbot = gr.ChatInterface(
40
- respond,
41
- chatbot=gr.Chatbot(type="messages"),
42
- textbox=gr.Textbox(
43
- placeholder="Paste abstract of a chemistry paper…",
44
- lines=8,
 
 
 
45
  ),
46
- additional_inputs=[
47
- gr.Textbox(
48
- value="Summarise this chemistry paper abstract:",
49
- label="System message",
50
- ),
51
- gr.Slider(16, 1024, value=256, step=8, label="Max new tokens"),
52
- gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
54
- login_btn, # 👈 passes OAuthToken to respond()
55
- ],
56
- type="messages",
57
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
 
59
  if __name__ == "__main__":
60
- demo.launch() # Hugging Face Spaces picks this up automatically
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from typing import List, Dict
 
 
4
 
5
+ # Response function for the chatbot
6
  def respond(
7
  message: str,
8
+ history: List[Dict[str, str]],
9
  system_message: str,
10
  max_tokens: int,
11
  temperature: float,
12
  top_p: float,
13
+ hf_token: gr.OAuthToken,
14
  ):
15
+ """
16
+ Sends a chat message to the Hugging Face Inference API using the provided token and parameters.
17
+ """
18
  client = InferenceClient(
19
+ token=hf_token.token,
20
+ model="Bocklitz-Lab/lit2vec-tldr-bart-model"
21
  )
22
 
23
+ messages = [{"role": "system", "content": system_message}] + history
24
+ messages.append({"role": "user", "content": message})
25
+
26
+ response = ""
27
 
28
+ for message_chunk in client.chat_completion(
29
+ messages,
30
+ max_tokens=max_tokens,
31
+ stream=True,
32
  temperature=temperature,
33
  top_p=top_p,
 
34
  ):
35
+ if message_chunk.choices and message_chunk.choices[0].delta.content:
36
+ token = message_chunk.choices[0].delta.content
37
+ response += token
38
+ yield response
 
 
39
 
40
+ # Define the Gradio interface
41
+ chatbot = gr.ChatInterface(
42
+ fn=respond,
43
+ type="messages",
44
+ additional_inputs=[
45
+ gr.Textbox(
46
+ value="You are a friendly chatbot.",
47
+ label="System message",
48
+ lines=1
49
  ),
50
+ gr.Slider(
51
+ minimum=1,
52
+ maximum=2048,
53
+ value=512,
54
+ step=1,
55
+ label="Max new tokens"
56
+ ),
57
+ gr.Slider(
58
+ minimum=0.1,
59
+ maximum=4.0,
60
+ value=0.7,
61
+ step=0.1,
62
+ label="Temperature"
63
+ ),
64
+ gr.Slider(
65
+ minimum=0.1,
66
+ maximum=1.0,
67
+ value=0.95,
68
+ step=0.05,
69
+ label="Top-p (nucleus sampling)"
70
+ ),
71
+ ],
72
+ )
73
+
74
+ # Set up the full Gradio Blocks layout with login
75
+ with gr.Blocks() as demo:
76
+ with gr.Row():
77
+ with gr.Column(scale=1):
78
+ gr.LoginButton()
79
+ chatbot.render()
80
 
81
+ # Run the app
82
  if __name__ == "__main__":
83
+ demo.launch()