File size: 1,282 Bytes
4def57d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from flask import Flask, request, jsonify
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer

app = Flask(__name__)

# Load a lightweight model for structured output
model_name = "google/flan-t5-small"  # You can replace this with another small instruct model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)

@app.route('/generate-json', methods=['POST'])
def generate_json():
    data = request.json
    prompt = data.get("prompt")

    if not prompt:
        return jsonify({"error": "Prompt is required"}), 400

    # Add instruction if needed
    instruction = f"Generate a JSON object from the following description:\n{prompt}"
    
    response = generator(instruction, max_length=256, do_sample=False)
    generated_text = response[0]["generated_text"]

    try:
        # Try to parse generated text as JSON
        generated_json = eval(generated_text)  # Use json.loads if safe JSON string is expected
        return jsonify({"json": generated_json})
    except Exception as e:
        return jsonify({"raw_output": generated_text, "error": str(e)}), 200

if __name__ == '__main__':
    app.run(debug=True)