dexay commited on
Commit
a0ed7fc
·
1 Parent(s): 371b679

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -8,11 +8,12 @@ st.write("This tool lets you extract relation triples concerning interactions be
8
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
9
 
10
  form = st.form(key='my-form')
11
- x = form.text_input('Enter your text')
12
  submit = form.form_submit_button('Submit')
13
 
14
  if submit and len(x) != 0:
15
  #model.to("cpu")
 
16
  tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
17
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
18
 
@@ -20,7 +21,7 @@ if submit and len(x) != 0:
20
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
21
  token_classifier = pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
22
 
23
- st.text("Knowledge extraction is in progress ...")
24
 
25
  if x[-1] not in ".?:":
26
  x += "."
 
8
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
9
 
10
  form = st.form(key='my-form')
11
+ x = form.text_area('Enter text', height=275)
12
  submit = form.form_submit_button('Submit')
13
 
14
  if submit and len(x) != 0:
15
  #model.to("cpu")
16
+ st.text("Execution is in progress ...")
17
  tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
18
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
19
 
 
21
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
22
  token_classifier = pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
23
 
24
+
25
 
26
  if x[-1] not in ".?:":
27
  x += "."