|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
model_id = "mistralai/Mistral-7B-Instruct-v0.2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto") |
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
def generate_json(prompt): |
|
system_prompt = "You are an AI that generates valid and clean JSON objects based on descriptions." |
|
input_prompt = f"{system_prompt}\n\nDescription:\n{prompt}\n\nJSON:\n" |
|
output = generator(input_prompt, max_new_tokens=256, do_sample=False)[0]["generated_text"] |
|
|
|
|
|
json_part = output.replace(input_prompt, "").strip() |
|
return json_part |
|
|
|
|
|
gr.Interface( |
|
fn=generate_json, |
|
inputs=gr.Textbox(label="Describe the JSON you want", lines=4), |
|
outputs=gr.Textbox(label="Generated JSON", lines=20), |
|
title="🧠 JSON Generator with LLM", |
|
description="Enter a plain language description and get a structured JSON output." |
|
).launch() |
|
|