Spaces:
Paused
Paused
Commit
·
1296b9e
1
Parent(s):
9891731
loggin
Browse files
app.py
CHANGED
@@ -1,5 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Load model directly from your Hugging Face repository
|
5 |
def load_model():
|
@@ -22,7 +27,9 @@ def generate_soap_note(doctor_patient_conversation):
|
|
22 |
|
23 |
|
24 |
# Decode and extract the response part
|
25 |
-
|
|
|
|
|
26 |
|
27 |
# Load model and tokenizer (this will run once when the app starts)
|
28 |
model, tokenizer = load_model()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
from transformers.utils import logging
|
4 |
+
|
5 |
+
logging.set_verbosity_debug()
|
6 |
+
|
7 |
+
logger = logging.get_logger("transformers")
|
8 |
|
9 |
# Load model directly from your Hugging Face repository
|
10 |
def load_model():
|
|
|
27 |
|
28 |
|
29 |
# Decode and extract the response part
|
30 |
+
decoded_response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
31 |
+
logger.debug(f"Decoded response: {decoded_response}")
|
32 |
+
return decoded_response
|
33 |
|
34 |
# Load model and tokenizer (this will run once when the app starts)
|
35 |
model, tokenizer = load_model()
|