File size: 1,172 Bytes
65757ca
 
 
4def57d
dd3284f
aed0be3
4def57d
 
dd3284f
4def57d
 
65757ca
dd3284f
 
 
 
4def57d
e360b55
65757ca
4def57d
65757ca
dd3284f
65757ca
4def57d
233b266
65757ca
 
 
dd3284f
 
233b266
 
67656e6
1510ad8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import json

# 🧠 Use model fine-tuned for JSON generation
model_name = "deepseek-ai/deepseek-coder-1.3b-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)

def generate_json(prompt):
    instruction = f"Generate JSON: {prompt}"
    result = generator(instruction, max_length=256, do_sample=False)
    generated_text = result[0]["generated_text"]

    try:
        parsed = json.loads(generated_text)
        formatted_json = json.dumps(parsed, indent=2)
    except Exception as e:
        formatted_json = f"Raw Output:\n{generated_text}\n\nError parsing JSON: {e}"

    return formatted_json

demo = gr.Interface(
    fn=generate_json,
    inputs=gr.Textbox(lines=4, label="Enter Prompt"),
    outputs=gr.Textbox(lines=20, label="Generated JSON"),
    title="Accurate JSON Generator",
    description="Uses a fine-tuned model to reliably generate JSON from natural language prompts."
)

demo.queue()
demo.launch(show_error=True)