Spaces:
Running
Running
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig | |
model_id = "TheBloke/CodeLlama-7B-GPTQ" # Example 4-bit quantized model | |
bnb_config = BitsAndBytesConfig(load_in_4bit=True, device_map="auto") | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
quantization_config=bnb_config, | |
device_map="auto" | |
) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def generate_response(prompt): | |
output = pipe(prompt, max_length=512, do_sample=True, temperature=0.3)[0]['generated_text'] | |
return output | |
gr.Interface( | |
fn=generate_response, | |
inputs=gr.Textbox(lines=5, label="Your prompt"), | |
outputs=gr.Textbox(label="Code Llama response"), | |
title="Code Llama Demo", | |
description="Ask questions or request code snippets!" | |
).launch() | |