Spaces:
Runtime error
Runtime error
File size: 1,143 Bytes
825fc05 587fbe2 825fc05 587fbe2 825fc05 587fbe2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import PeftModel
base_model = "mistralai/Mistral-7B-v0.1"
bnb_config = BitsAndBytesConfig(
load_in_4bit= True,
bnb_4bit_quant_type= "nf4",
bnb_4bit_compute_dtype= torch.bfloat16,
bnb_4bit_use_double_quant= False,
)
model = AutoModelForCausalLM.from_pretrained(
base_model,
quantization_config=bnb_config,
device_map={"": 0}
)
ft_model = PeftModel.from_pretrained(model, 'kiki7sun/mixtral-academic-finetune-QLoRA-0121')
tokenizer = AutoTokenizer.from_pretrained(
base_model_id,
add_bos_token=True,
trust_remote_code=True,
)
ft_model.eval()
def greet(eval_prompt,max_new_tokens):
model_input = tokenizer(eval_prompt, return_tensors="pt").to("cuda")
with torch.no_grad():
generation = ft_model.generate(**model_input, max_new_tokens = max_new_tokens)
result = tokenizer.decode(generation[0], skip_special_tokens=True)
return result
demo = gr.Interface(fn=greet, inputs="textbox", outputs="textbox")
demo.queue().launch(debug=True, share=True, inline=False) |