from flask import Flask, request, jsonify from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer app = Flask(__name__) # Load a lightweight model for structured output model_name = "google/flan-t5-small" # You can replace this with another small instruct model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer) @app.route('/generate-json', methods=['POST']) def generate_json(): data = request.json prompt = data.get("prompt") if not prompt: return jsonify({"error": "Prompt is required"}), 400 # Add instruction if needed instruction = f"Generate a JSON object from the following description:\n{prompt}" response = generator(instruction, max_length=256, do_sample=False) generated_text = response[0]["generated_text"] try: # Try to parse generated text as JSON generated_json = eval(generated_text) # Use json.loads if safe JSON string is expected return jsonify({"json": generated_json}) except Exception as e: return jsonify({"raw_output": generated_text, "error": str(e)}), 200 if __name__ == '__main__': app.run(debug=True)