Spaces:
Paused
Paused
File size: 3,576 Bytes
3dc4061 81395fc b01335d 3dc4061 6ebcdab 3dc4061 3202d1b 3dc4061 3202d1b 3dc4061 58fe6bc 3dc4061 58fe6bc 81395fc 237d9d2 3202d1b 81395fc 3202d1b 81395fc 10b4b34 81395fc 89df8e7 af69c70 81395fc bf9669d 81395fc d992640 237d9d2 b01335d 89df8e7 190355d 89df8e7 b01335d 3202d1b b01335d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
from transformers import AutoTokenizer, MistralForCausalLM
import torch
import gradio as gr
import random
from textwrap import wrap
from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
from peft import PeftModel, PeftConfig
import torch
import gradio as gr
# Functions to Wrap the Prompt Correctly
def wrap_text(text, width=90):
lines = text.split('\n')
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
# Combine user input and system prompt
formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
# Encode the input text
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
model_inputs = encodeds.to(device)
# Generate a response using the model
output = model.generate(
**model_inputs,
max_length=max_length,
use_cache=True,
early_stopping=True,
bos_token_id=model.config.bos_token_id,
eos_token_id=model.config.eos_token_id,
pad_token_id=model.config.eos_token_id,
temperature=0.1,
do_sample=True
)
response_text = tokenizer.decode(output[0], skip_special_tokens=True)
return response_text
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "OpenLLM-France/Claire-Mistral-7B-0.1"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
model = transformers.AutoModelForCausalLM.from_pretrained(model_name,
device_map="auto",
torch_dtype=torch.bfloat16,
load_in_4bit=True # For efficient inference, if supported by the GPU card
)
class ChatBot:
def __init__(self):
self.history = []
def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
# Combine user input and system prompt
formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
# Encode user input
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
# Concatenate the user input with chat history
if len(self.history) > 0:
chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
else:
chat_history_ids = user_input_ids
# Generate a response using the PEFT model
response = model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
# Update chat history
self.history = chat_history_ids
# Decode and return the response
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
return response_text
bot = ChatBot()
title = "👋🏻Welcome to Tonic's Claire Chat🚀"
description = "You can use this Space to test out the current model ([ClaireLLM](https://huggingface.co/OpenLLM-France/Claire-Mistral-7B-0.1)) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord to build together](https://discord.gg/nXx5wbX9)."
examples = [["Oueche Normal, Claire, ça va ou quoi?", "bonjour je m'appele Claire et je suis une assistante francophone-first conçu par openLLM"]]
iface = gr.Interface(
fn=bot.predict,
title=title,
description=description,
examples=examples,
inputs=["text", "text"], # Take user input and system prompt separately
outputs="text",
theme="ParityError/Anime"
)
iface.launch()
|