|
import gradio as gr |
|
from huggingface_hub import InferenceClient, OAuthToken |
|
|
|
MODEL_ID = "Bocklitz-Lab/lit2vec-tldr-bart-model" |
|
|
|
|
|
def respond( |
|
message: str, |
|
history: list[dict[str, str]], |
|
system_message: str, |
|
max_tokens: int, |
|
temperature: float, |
|
top_p: float, |
|
hf_token: OAuthToken | None, |
|
): |
|
"""Summarise a chemistry abstract with the HF Inference API.""" |
|
client = InferenceClient( |
|
model=MODEL_ID, |
|
token=None if hf_token is None else hf_token.token, |
|
) |
|
|
|
prompt = f"{system_message.strip()}\n\n{message.strip()}" |
|
|
|
|
|
for chunk in client.text_generation( |
|
prompt, |
|
max_new_tokens=max_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
stream=True, |
|
): |
|
yield chunk |
|
|
|
|
|
with gr.Blocks(title="🧪 Chemistry Abstract Summariser") as demo: |
|
with gr.Sidebar(): |
|
login_btn = gr.LoginButton() |
|
|
|
chatbot = gr.ChatInterface( |
|
respond, |
|
chatbot=gr.Chatbot(type="messages"), |
|
textbox=gr.Textbox( |
|
placeholder="Paste abstract of a chemistry paper…", |
|
lines=8, |
|
), |
|
additional_inputs=[ |
|
gr.Textbox( |
|
value="Summarise this chemistry paper abstract:", |
|
label="System message", |
|
), |
|
gr.Slider(16, 1024, value=256, step=8, label="Max new tokens"), |
|
gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"), |
|
login_btn, |
|
], |
|
type="messages", |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|