Spaces:
Runtime error
Runtime error
File size: 1,338 Bytes
65757ca 4def57d 233b266 4def57d 65757ca fb24b48 65757ca fb24b48 f5f6079 4def57d f5f6079 e360b55 65757ca 4def57d f5f6079 65757ca 4def57d 233b266 65757ca 233b266 67656e6 1510ad8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import json
model_name = "google/flan-t5-small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
def generate_json(prompt):
instruction = f"Generate a JSON object with the following properties: {prompt}"
result = generator(instruction, max_length=256, do_sample=False)
generated_text = result[0]["generated_text"]
# Debug: Print raw model output to inspect
print(f"Raw Model Output: {generated_text}")
try:
# Try parsing the generated text as JSON
parsed = json.loads(generated_text)
formatted_json = json.dumps(parsed, indent=2)
except Exception as e:
# If parsing fails, print the error
formatted_json = f"Raw Output:\n{generated_text}\n\nError parsing JSON: {e}"
return formatted_json
demo = gr.Interface(
fn=generate_json,
inputs=gr.Textbox(lines=4, label="Enter Prompt"),
outputs=gr.Textbox(lines=20, label="Generated JSON"),
title="Lightweight JSON Generator",
description="Enter a prompt describing the structure or content you want in JSON format."
)
demo.queue()
demo.launch(show_error=True)
|