gregorlied commited on
Commit
6b4f26c
·
verified ·
1 Parent(s): e08fb99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -19
app.py CHANGED
@@ -1,29 +1,16 @@
1
  import os
2
  import spaces
3
  import gradio as gr
 
 
 
4
  from huggingface_hub import login as hf_login
5
- from pydantic import BaseModel
6
  from vllm import LLM
 
7
 
8
  hf_login(token=os.getenv("HF_TOKEN"))
9
 
10
- class PatientRecord(BaseModel):
11
- life_style: str
12
- family_history: str
13
- social_history: str
14
- medical_surgical_history: str
15
- signs_symptoms: str
16
- comorbidities: str
17
- diagnostic_techniques_procedures: str
18
- diagnosis: str
19
- laboratory_values: str
20
- pathology: str
21
- pharmacological_therapy: str
22
- interventional_therapy: str
23
- patient_outcome_assessment: str
24
- age: str
25
- gender: str
26
-
27
  model_name = "meta-llama/Llama-3.2-1B-Instruct"
28
 
29
  model = LLM(
@@ -33,8 +20,77 @@ model = LLM(
33
  enforce_eager=True,
34
  )
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with gr.Blocks() as demo:
37
- gr.Markdown("# 🎓 Paper Analysis Tool")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  if __name__ == "__main__":
40
  demo.launch()
 
1
  import os
2
  import spaces
3
  import gradio as gr
4
+
5
+ import torch
6
+ from transformers import AutoTokenizer
7
  from huggingface_hub import login as hf_login
8
+
9
  from vllm import LLM
10
+ from pydantic import BaseModel
11
 
12
  hf_login(token=os.getenv("HF_TOKEN"))
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  model_name = "meta-llama/Llama-3.2-1B-Instruct"
15
 
16
  model = LLM(
 
20
  enforce_eager=True,
21
  )
22
 
23
+ class Info(BaseModel):
24
+ name: str
25
+ age: int
26
+
27
+ json_schema = Info.model_json_schema()
28
+ guided_decoding_params = GuidedDecodingParams(json=json_schema)
29
+ sampling_params = SamplingParams(
30
+ temperature=0.1,
31
+ max_tokens=2048,
32
+ guided_decoding=guided_decoding_params,
33
+ )
34
+
35
+ prompt = "You are a helpful assistant."
36
+
37
+ tokenizer = AutoTokenizer.from_pretrained(
38
+ model_name,
39
+ padding_side='right',
40
+ trust_remote_code=True,
41
+ )
42
+
43
+ if tokenizer.pad_token is None:
44
+ tokenizer.add_special_tokens({'pad_token': '<pad>'})
45
+
46
+ @spaces.GPU(duration=60)
47
+ def summarize(text):
48
+ if not text.strip():
49
+ return "Please enter some text to summarize."
50
+
51
+ messages = [
52
+ {"role": "system", "content": prompt},
53
+ {"role": "user", "content": text},
54
+ ]
55
+
56
+ input_text = tokenizer.apply_chat_template(
57
+ messages,
58
+ tokenize=False,
59
+ add_generation_prompt=True,
60
+ enable_thinking=False,
61
+ )
62
+
63
+ outputs = model.generate([input_text], sampling_params)
64
+ prediction = outputs[0].outputs[0].text
65
+ return prediction
66
+
67
  with gr.Blocks() as demo:
68
+ gr.Markdown("## 📝 Summarization for News, SciTLDR and Dialog Texts")
69
+
70
+ with gr.Row():
71
+ input_text = gr.Textbox(
72
+ label="Input Text",
73
+ autoscroll=False,
74
+ lines=15,
75
+ max_lines=15,
76
+ placeholder="Paste your article or paragraph here...",
77
+ )
78
+ output_text = gr.Textbox(
79
+ label="Summary",
80
+ autoscroll=False,
81
+ lines=15,
82
+ max_lines=15,
83
+ show_copy_button=True,
84
+ )
85
+
86
+ with gr.Row():
87
+ summarize_btn = gr.Button("Summarize")
88
+ summarize_btn.click(
89
+ fn=summarize,
90
+ inputs=input_text,
91
+ outputs=output_text,
92
+ show_progress=True,
93
+ )
94
 
95
  if __name__ == "__main__":
96
  demo.launch()