gregorlied commited on
Commit
a6e0ce3
·
verified ·
1 Parent(s): c88d4a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -58
app.py CHANGED
@@ -2,68 +2,58 @@ import os
2
  import spaces
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
 
5
 
6
- """
7
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
8
- """
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
 
11
- model_name = "HuggingFaceH4/zephyr-7b-beta"
12
- client = InferenceClient(model_name, api_key=HF_TOKEN)
13
-
14
- @spaces.GPU(duration=60)
15
- def respond(
16
- message,
17
- history: list[tuple[str, str]],
18
- system_message,
19
- max_tokens,
20
- temperature,
21
- top_p,
22
- ):
23
- messages = [{"role": "system", "content": system_message}]
24
-
25
- for val in history:
26
- if val[0]:
27
- messages.append({"role": "user", "content": val[0]})
28
- if val[1]:
29
- messages.append({"role": "assistant", "content": val[1]})
30
-
31
- messages.append({"role": "user", "content": message})
32
-
33
- response = ""
34
-
35
- for message in client.chat_completion(
36
- messages,
37
- max_tokens=max_tokens,
38
- stream=True,
39
- temperature=temperature,
40
- top_p=top_p,
41
- ):
42
- token = message.choices[0].delta.content
43
-
44
- response += token
45
- yield response
46
-
47
-
48
- """
49
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
50
- """
51
- demo = gr.ChatInterface(
52
- respond,
53
- additional_inputs=[
54
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
55
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
56
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
57
- gr.Slider(
58
- minimum=0.1,
59
- maximum=1.0,
60
- value=0.95,
61
- step=0.05,
62
- label="Top-p (nucleus sampling)",
63
- ),
64
- ],
65
  )
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  if __name__ == "__main__":
69
- demo.launch()
 
2
  import spaces
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
+ from pydantic import BaseModel
6
 
 
 
 
7
  HF_TOKEN = os.getenv("HF_TOKEN")
8
 
9
+ class PaperAnalysis(BaseModel):
10
+ title: str
11
+ abstract_summary: str
12
+
13
+ response_format = {
14
+ "type": "json_schema",
15
+ "json_schema": {
16
+ "name": "PaperAnalysis",
17
+ "schema": PaperAnalysis.model_json_schema(),
18
+ "strict": True,
19
+ },
20
+ }
21
+
22
+ client = InferenceClient(
23
+ provider="auto",
24
+ api_key=HF_TOKEN,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  )
26
 
27
+ @spaces.GPU(duration=60)
28
+ def extract_info(paper_text: str):
29
+ if not paper_text.strip():
30
+ return {"title": "", "abstract_summary": ""}
31
+ messages = [
32
+ {"role": "system", "content": "Extract the paper title and summarize its abstract."},
33
+ {"role": "user", "content": paper_text},
34
+ ]
35
+ resp = client.chat.completions.create(
36
+ model="meta-llama/Llama-3.2-1B-Instruct",
37
+ messages=messages,
38
+ response_format=response_format,
39
+ )
40
+ # parse response
41
+ parsed = resp.choices[0].message
42
+ # assuming the message is a dict
43
+ return {
44
+ "title": parsed.content.get("title", ""),
45
+ "abstract_summary": parsed.content.get("abstract_summary", ""),
46
+ }
47
+
48
+ with gr.Blocks() as demo:
49
+ gr.Markdown("# 🎓 Paper Analysis Tool")
50
+ with gr.Row():
51
+ paper_input = gr.Textbox(label="Paper Text (include Title/Abstract)", lines=10)
52
+ with gr.Column():
53
+ title_out = gr.Textbox(label="Title", lines=1)
54
+ summary_out = gr.Textbox(label="Abstract Summary", lines=5)
55
+ analyze_btn = gr.Button("Extract Info")
56
+ analyze_btn.click(fn=extract_info, inputs=paper_input, outputs=[title_out, summary_out])
57
 
58
  if __name__ == "__main__":
59
+ demo.launch()