Spaces:
Sleeping
Sleeping
File size: 2,785 Bytes
ade7754 2f06227 56acf35 2f06227 56acf35 2f06227 56acf35 9f45f9d 238b0d2 f114c57 d45a3dd f114c57 30821be 9f45f9d aff682b 30821be 9f45f9d aff682b 45a301c 2f06227 56acf35 f63a516 6430282 f63a516 9f45f9d 6430282 56acf35 f881630 9f45f9d 6430282 9f45f9d 6430282 acabee9 6430282 9f45f9d acabee9 9f45f9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import streamlit as st
from transformers import pipeline
import streamlit as st
from st_chat_message import message
message("Hello world!", is_user=True)
message("Hi")
st.markdown("# Auto-generating Question-Answering Datasets with Domain-Specific Knowledge for Language Models in Scientific Tasks", unsafe_allow_html=True)
##########
# Transformation Algorithm
##########
st.markdown('## QA Dataset Auto Generation', unsafe_allow_html=True)
##########
# Property Extraction
##########
st.markdown('## Property Extraction', unsafe_allow_html=True)
st.markdown('### Select a model: ', unsafe_allow_html=True)
# 定义模型配置选项
size_lst = ["-base", "-large"]
cased_lst = ["-cased", "-uncased"]
fpretrain_lst = ["None", "-scsmall", "-scmedium", "-sclarge"]
finetune_lst = ["-squad", "-scqa1", "-scqa2"]
# 为每个选项创建下拉菜单
size = st.selectbox("Choose a model size:", size_lst)
cased = st.selectbox("Whether distinguish upper and lowercase letters:", cased_lst)
fpretrain = st.selectbox("Further pretrained on a solar cell corpus:", fpretrain_lst)
finetune = st.selectbox("Finetuned on a QA dataset:", finetune_lst)
# 根据选择构建模型名称
if fpretrain == "None":
model = "".join(["ZongqianLi/bert", size, cased, finetune])
else:
model = "".join(["ZongqianLi/bert", size, cased, fpretrain, finetune])
# 显示用户选择的模型
st.write(f"Your selected model: {model}")
# 加载问答模型
pipe = pipeline("question-answering", model=model)
st.markdown('### Input a paper: ', unsafe_allow_html=True)
# 设置默认的问题和上下文
default_property = "FF"
default_context = "The referential DSSC with Pt CE was also measured under the same conditions, which yields η of 6.66% (Voc= 0.78 V, Jsc= 13.0 mA cm−2, FF = 65.9%)."
# 获取用户输入的问题和上下文
property = st.text_input("Enter your the name of the property: ", value=default_property)
context = st.text_area("Enter the context: ", value=default_context, height=100)
# 添加一个按钮,用户点击后执行问答
if st.button('Extract the property'):
question_1 = f"What is the value of {property}?"
if context and question_1:
out = pipe({
'question': question_1,
'context': context
})
value = out["answer"]
st.write(f"First-turn question: {question_1}")
st.write(f"First-turn answer: {value}")
question_2 = f"What material has {property} of {value}?"
out = pipe({
'question': question_2,
'context': context
})
material = out["answer"]
st.write(f"Second-turn question: {question_2}")
st.write(f"First-turn answer: {material}")
else:
st.write("Please enter both a question and context.")
|