File size: 744 Bytes
f343423
5ac4d70
 
f343423
5ac4d70
f343423
5ac4d70
 
 
 
 
f343423
 
5ac4d70
 
 
 
 
 
f343423
5ac4d70
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch

model_id = "aaditya/Llama3-OpenBioLLM-8B"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map="auto",
    torch_dtype=torch.float16
)

chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)

def chatbot(message, history=[]):
    prompt = f"[INST] {message} [/INST]"
    response = chat_pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)[0]['generated_text']
    return response.replace(prompt, "").strip()

gr.ChatInterface(fn=chatbot, title="🩺 OpenBioLLM Chatbot", description="Ask me anything biomedical!").launch()