kevalfst commited on
Commit
65757ca
Β·
verified Β·
1 Parent(s): 82611e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -24
app.py CHANGED
@@ -1,35 +1,31 @@
1
- from flask import Flask, request, jsonify
2
- from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
 
3
 
4
- app = Flask(__name__)
5
-
6
- # Load a lightweight model for structured output
7
- model_name = "google/flan-t5-small" # You can replace this with another small instruct model
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
10
 
11
  generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
12
 
13
- @app.route('/generate-json', methods=['POST'])
14
- def generate_json():
15
- data = request.json
16
- prompt = data.get("prompt")
17
-
18
- if not prompt:
19
- return jsonify({"error": "Prompt is required"}), 400
20
-
21
- # Add instruction if needed
22
  instruction = f"Generate a JSON object from the following description:\n{prompt}"
23
-
24
- response = generator(instruction, max_length=256, do_sample=False)
25
- generated_text = response[0]["generated_text"]
26
 
27
  try:
28
- # Try to parse generated text as JSON
29
- generated_json = eval(generated_text) # Use json.loads if safe JSON string is expected
30
- return jsonify({"json": generated_json})
31
  except Exception as e:
32
- return jsonify({"raw_output": generated_text, "error": str(e)}), 200
 
 
33
 
34
- if __name__ == '__main__':
35
- app.run(debug=True)
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import json
4
 
5
+ # Load lightweight model
6
+ model_name = "google/flan-t5-small" # You can change this to another small instruct model
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
 
10
  generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
11
 
12
+ def generate_json(prompt):
 
 
 
 
 
 
 
 
13
  instruction = f"Generate a JSON object from the following description:\n{prompt}"
14
+ result = generator(instruction, max_length=256, do_sample=False)
15
+ generated_text = result[0]["generated_text"]
 
16
 
17
  try:
18
+ parsed = eval(generated_text) # Not recommended in production; use json.loads if output is valid JSON
19
+ formatted_json = json.dumps(parsed, indent=2)
 
20
  except Exception as e:
21
+ formatted_json = f"Raw Output:\n{generated_text}\n\nError parsing JSON: {e}"
22
+
23
+ return formatted_json
24
 
25
+ gr.Interface(
26
+ fn=generate_json,
27
+ inputs=gr.Textbox(lines=4, label="Enter Prompt"),
28
+ outputs=gr.Textbox(lines=20, label="Generated JSON"),
29
+ title="Lightweight JSON Generator",
30
+ description="Enter a prompt describing the structure or content you want in JSON format."
31
+ ).launch()