Spaces:
Sleeping
Sleeping
Upload 11 files
Browse files- .gitattributes +1 -0
- AutoregressionSchema.gif +3 -0
- app.py +0 -0
- app2.py +193 -0
- app2_BLIP.py +131 -0
- app_BLIP_processing.py +41 -0
- app_main copy 2.py +1248 -0
- app_main copy.py +1400 -0
- app_main.py +623 -500
- assets_manipulate.py +122 -0
- openclip_embeddings.ipynb +0 -0
- test_app.ipynb +0 -0
.gitattributes
CHANGED
@@ -37,3 +37,4 @@ reference/Backdrops/beach_malibu.sb3/050615fe992a00d6af0e664e497ebf53.png filter
|
|
37 |
reference/Backdrops/castle2.sb3/951765ee7f7370f120c9df20b577c22f.png filter=lfs diff=lfs merge=lfs -text
|
38 |
reference/Backdrops/hall.sb3/ea86ca30b346f27ca5faf1254f6a31e3.png filter=lfs diff=lfs merge=lfs -text
|
39 |
reference/Backdrops/jungle.sb3/f4f908da19e2753f3ed679d7b37650ca.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
37 |
reference/Backdrops/castle2.sb3/951765ee7f7370f120c9df20b577c22f.png filter=lfs diff=lfs merge=lfs -text
|
38 |
reference/Backdrops/hall.sb3/ea86ca30b346f27ca5faf1254f6a31e3.png filter=lfs diff=lfs merge=lfs -text
|
39 |
reference/Backdrops/jungle.sb3/f4f908da19e2753f3ed679d7b37650ca.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
AutoregressionSchema.gif filter=lfs diff=lfs merge=lfs -text
|
AutoregressionSchema.gif
ADDED
![]() |
Git LFS Details
|
app.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app2.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import io
|
3 |
+
import base64
|
4 |
+
import os, re
|
5 |
+
from langchain_google_vertexai.vision_models import VertexAIVisualQnAChat
|
6 |
+
from PIL import Image
|
7 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
8 |
+
from langchain_groq import ChatGroq
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
from groq import Groq
|
11 |
+
from flask import Flask, jsonify
|
12 |
+
from langgraph.prebuilt import create_react_agent
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
16 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
17 |
+
|
18 |
+
app = Flask(__name__)
|
19 |
+
|
20 |
+
'''#initialize groq client
|
21 |
+
client = Groq(api_key=groq_api_key)
|
22 |
+
print(f"client:{client}") '''
|
23 |
+
|
24 |
+
static_image_path = os.path.join("images", "page2_print.jfif")
|
25 |
+
|
26 |
+
llm = ChatGroq(
|
27 |
+
model="meta-llama/llama-4-maverick-17b-128e-instruct",
|
28 |
+
temperature=0,
|
29 |
+
max_tokens=None,
|
30 |
+
)
|
31 |
+
|
32 |
+
@app.route("/", methods=["GET"])
|
33 |
+
def analyze_static_image():
|
34 |
+
if not os.path.exists(static_image_path):
|
35 |
+
return jsonify({"error": f"Image not found"})
|
36 |
+
|
37 |
+
# Load image and convert to base64 string
|
38 |
+
image_path = r"images\page2_print.jfif"
|
39 |
+
with open(image_path, "rb") as image_file:
|
40 |
+
image_bytes = image_file.read()
|
41 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
42 |
+
|
43 |
+
# # Construct image content block
|
44 |
+
# image_content_block = {
|
45 |
+
# "type": "image_url",
|
46 |
+
# "image_url": {
|
47 |
+
# # "url": f"data:image/jpeg;base64,{image_data_url}"
|
48 |
+
# "url": f"data:image/jpeg;base64,{img_base64}"
|
49 |
+
# }
|
50 |
+
# }
|
51 |
+
|
52 |
+
# SET A SYSTEM PROMPT
|
53 |
+
system_prompt = """
|
54 |
+
You are an expert in visual scene understanding.
|
55 |
+
Your Job is to analyze an image and respond with structured json like This :
|
56 |
+
- Any number of "Sprites": These refer to distinct characters, animals, or objects in the image that are **in front of the background** (e.g., cat, ball, crab, person, etc.).
|
57 |
+
{
|
58 |
+
"Sprite 1": {
|
59 |
+
"name": "Cat",
|
60 |
+
"description":"An orange cartoon cat with a cheerful expression, shown jumping playfully."
|
61 |
+
},
|
62 |
+
"Backdrop":{
|
63 |
+
"name":"Beach Scene",
|
64 |
+
"description":"A serene beach with sand, blue water, and a clear sky."
|
65 |
+
}
|
66 |
+
}
|
67 |
+
Guidelines:
|
68 |
+
- Focus only the images given in Square Shape.
|
69 |
+
- Don't Consider Blank areas in Image as "Backdrop".
|
70 |
+
- Do NOT classify the background scene as a sprite.
|
71 |
+
- All characters or objects placed in the foreground should be "Sprites".
|
72 |
+
- Use 'Sprite 1', 'Sprite 2', etc. for character or figures.
|
73 |
+
- Use 'Backdrop' for environmental setting or Background behind Sprite.
|
74 |
+
- Don't include generic summary or explanation outside the fields.
|
75 |
+
Return only valid JSON.
|
76 |
+
"""
|
77 |
+
|
78 |
+
# Compose message using LangChain's HumanMessage
|
79 |
+
content = [
|
80 |
+
{
|
81 |
+
"type": "text",
|
82 |
+
"text": "Analyze the image and describe the backdrops and characters as per instruction."
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"type": "image_url",
|
86 |
+
"image_url": {
|
87 |
+
"url": f"data:image/jpeg;base64,{img_base64}"
|
88 |
+
}
|
89 |
+
}
|
90 |
+
]
|
91 |
+
|
92 |
+
agent = create_react_agent(
|
93 |
+
model = llm,
|
94 |
+
tools = [],
|
95 |
+
prompt = system_prompt
|
96 |
+
)
|
97 |
+
|
98 |
+
# agent_executor = AgentExecutor(agent=agent, tools=[])
|
99 |
+
# Pass the human prompt + system message
|
100 |
+
# messages = [system_prompt, *human_prompt]
|
101 |
+
# messages = [system_prompt, *human_prompt]
|
102 |
+
|
103 |
+
# call the LLM
|
104 |
+
try:
|
105 |
+
# response = llm.invoke(messages)
|
106 |
+
# response = agent.invoke({"input":human_prompt})
|
107 |
+
response = agent.invoke({"messages": [{"role": "user", "content":content}]})
|
108 |
+
print(response)
|
109 |
+
|
110 |
+
raw_response = response["messages"][-1].content
|
111 |
+
|
112 |
+
cleaned_json_str = re.sub(r"^```json\s*|\s*```$", "", raw_response.strip(), flags=re.DOTALL)
|
113 |
+
try:
|
114 |
+
detected_info = json.loads(cleaned_json_str)
|
115 |
+
except json.JSONDecodeError as e:
|
116 |
+
# If parsing fails, fallback to raw string or handle error
|
117 |
+
print("JSON parsing error:", e)
|
118 |
+
detected_info = cleaned_json_str # or handle as needed
|
119 |
+
# Extract the answer text from the response
|
120 |
+
# detected_info = response.content
|
121 |
+
# detected_info = raw_response
|
122 |
+
except Exception as e:
|
123 |
+
return jsonify({"error": str(e)}), 500
|
124 |
+
|
125 |
+
# Save the detected information to a JSON file
|
126 |
+
result = {
|
127 |
+
"image_path": image_path,
|
128 |
+
"detected_info": detected_info,
|
129 |
+
}
|
130 |
+
|
131 |
+
# Save JSON result
|
132 |
+
with open("detected_image_info.json", "w") as f:
|
133 |
+
json.dump(result, f, indent=4)
|
134 |
+
print("Detection results saved to detected_image_info.json")
|
135 |
+
return jsonify(result)
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
app.run(debug=True)
|
139 |
+
|
140 |
+
|
141 |
+
'''#build the chat messages
|
142 |
+
messages = [
|
143 |
+
{
|
144 |
+
"role":"system",
|
145 |
+
"content":"you are an expert image analyzer. Describe backdrops and sprite/character in the image."
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"role":"user",
|
149 |
+
"content":[
|
150 |
+
{
|
151 |
+
"type":"text",
|
152 |
+
"text":"Describe image in detail. What backdrops and characters are present ?"
|
153 |
+
},
|
154 |
+
image_content_block
|
155 |
+
]
|
156 |
+
}
|
157 |
+
]'''
|
158 |
+
|
159 |
+
'''# create completion with Groq
|
160 |
+
response = client.chat.completions.create(
|
161 |
+
model = "meta-llama/llama-4-maverick-17b-128e-instruct",
|
162 |
+
messages=messages,
|
163 |
+
temperature=0,
|
164 |
+
max_tokens=1024,
|
165 |
+
top_p=1,
|
166 |
+
stream=False
|
167 |
+
)
|
168 |
+
print(f"\n\n========RESPONSE CHOICES : {response}\n\n")
|
169 |
+
# extract the result
|
170 |
+
detected_info = response.choices[0].message.content
|
171 |
+
print(f"DETECTED_INFO : {detected_info}")
|
172 |
+
|
173 |
+
|
174 |
+
# save output to json
|
175 |
+
output_data = {
|
176 |
+
"image_path":image_path,
|
177 |
+
"detected_info":detected_info
|
178 |
+
}
|
179 |
+
print(f"output_data : {output_data}")
|
180 |
+
|
181 |
+
with open("detected_image_info.json", "w") as f:
|
182 |
+
json.dump(output_data, f, indent=4)
|
183 |
+
|
184 |
+
print("✅ Detection results saved to detected_image_info.json")'''
|
185 |
+
|
186 |
+
|
187 |
+
# # Define the question to detect objects and characters in the image
|
188 |
+
# question = "What objects and characters are present in this image?"
|
189 |
+
|
190 |
+
# messages = [HumanMessage(content=[image_content_block, question])]
|
191 |
+
# print(messages)
|
192 |
+
# Invoke the model with the image and question
|
193 |
+
# response = llm.invoke({"image": image_content_block, "question": question})
|
app2_BLIP.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import io
|
3 |
+
import base64
|
4 |
+
import os, re
|
5 |
+
from langchain_google_vertexai.vision_models import VertexAIVisualQnAChat
|
6 |
+
from PIL import Image
|
7 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
8 |
+
from langchain_groq import ChatGroq
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
from groq import Groq
|
11 |
+
from flask import Flask, jsonify
|
12 |
+
from langgraph.prebuilt import create_react_agent
|
13 |
+
|
14 |
+
from langchain_community.llms import huggingface_pipeline
|
15 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
16 |
+
import torch
|
17 |
+
from langchain_core.prompts import PromptTemplate
|
18 |
+
|
19 |
+
load_dotenv()
|
20 |
+
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
21 |
+
# groq_api_key = os.getenv("GROQ_API_KEY")
|
22 |
+
|
23 |
+
app = Flask(__name__)
|
24 |
+
|
25 |
+
static_image_path = os.path.join("images", "page2_print.jfif")
|
26 |
+
|
27 |
+
# llm = ChatGroq(
|
28 |
+
# model="meta-llama/llama-4-maverick-17b-128e-instruct",
|
29 |
+
# temperature=0,
|
30 |
+
# max_tokens=None,
|
31 |
+
# )
|
32 |
+
|
33 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
34 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cpu")
|
35 |
+
|
36 |
+
def analyze_with_blip(image_pil):
|
37 |
+
inputs = processor(image_pil, return_tensors="pt").to("cpu")
|
38 |
+
out = model.generate(**inputs, max_new_tokens=100)
|
39 |
+
caption = processor.decode(out[0], skip_special_tokens=True)
|
40 |
+
return caption
|
41 |
+
|
42 |
+
@app.route("/", methods=["GET"])
|
43 |
+
def analyze_static_image():
|
44 |
+
if not os.path.exists(static_image_path):
|
45 |
+
return jsonify({"error": f"Image not found"})
|
46 |
+
|
47 |
+
# Load image and convert to base64 string
|
48 |
+
image_path = r"images\page2_print.jfif"
|
49 |
+
with open(image_path, "rb") as image_file:
|
50 |
+
image_bytes = image_file.read()
|
51 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
52 |
+
|
53 |
+
# SET A SYSTEM PROMPT
|
54 |
+
system_prompt = """
|
55 |
+
You are an expert in visual scene understanding.
|
56 |
+
Your Job is to analyze an image and respond with structured json like This :
|
57 |
+
- Any number of "Sprites": These refer to distinct characters, animals, or objects in the image that are **in front of the background** (e.g., cat, ball, crab, person, etc.).
|
58 |
+
{
|
59 |
+
"Sprite 1": {
|
60 |
+
"name": "Cat",
|
61 |
+
"description":"An orange cartoon cat with a cheerful expression, shown jumping playfully."
|
62 |
+
},
|
63 |
+
"Backdrop":{
|
64 |
+
"name":"Beach Scene",
|
65 |
+
"description":"A serene beach with sand, blue water, and a clear sky."
|
66 |
+
}
|
67 |
+
}
|
68 |
+
Guidelines:
|
69 |
+
- Focus only the images given in Square Shape.
|
70 |
+
- Don't Consider Blank areas in Image as "Backdrop".
|
71 |
+
- Do NOT classify the background scene as a sprite.
|
72 |
+
- All characters or objects placed in the foreground should be "Sprites".
|
73 |
+
- Use 'Sprite 1', 'Sprite 2', etc. for character or figures.
|
74 |
+
- Use 'Backdrop' for environmental setting or Background behind Sprite.
|
75 |
+
- Don't include generic summary or explanation outside the fields.
|
76 |
+
Return only valid JSON.
|
77 |
+
"""
|
78 |
+
|
79 |
+
# Compose message using LangChain's HumanMessage
|
80 |
+
content = [
|
81 |
+
{
|
82 |
+
"type": "text",
|
83 |
+
"text": "Analyze the image and describe the backdrops and characters as per instruction."
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"type": "image_url",
|
87 |
+
"image_url": {
|
88 |
+
"url": f"data:image/jpeg;base64,{img_base64}"
|
89 |
+
}
|
90 |
+
}
|
91 |
+
]
|
92 |
+
|
93 |
+
agent = create_react_agent(
|
94 |
+
model = llm,
|
95 |
+
tools = [],
|
96 |
+
prompt = system_prompt
|
97 |
+
)
|
98 |
+
|
99 |
+
# call the LLM
|
100 |
+
try:
|
101 |
+
# response = llm.invoke(messages)
|
102 |
+
# response = agent.invoke({"input":human_prompt})
|
103 |
+
response = agent.invoke({"messages": [{"role": "user", "content":content}]})
|
104 |
+
print(response)
|
105 |
+
|
106 |
+
raw_response = response["messages"][-1].content
|
107 |
+
|
108 |
+
cleaned_json_str = re.sub(r"^```json\s*|\s*```$", "", raw_response.strip(), flags=re.DOTALL)
|
109 |
+
try:
|
110 |
+
detected_info = json.loads(cleaned_json_str)
|
111 |
+
except json.JSONDecodeError as e:
|
112 |
+
# If parsing fails, fallback to raw string or handle error
|
113 |
+
print("JSON parsing error:", e)
|
114 |
+
detected_info = cleaned_json_str # or handle as needed
|
115 |
+
except Exception as e:
|
116 |
+
return jsonify({"error": str(e)}), 500
|
117 |
+
|
118 |
+
# Save the detected information to a JSON file
|
119 |
+
result = {
|
120 |
+
"image_path": image_path,
|
121 |
+
"detected_info": detected_info,
|
122 |
+
}
|
123 |
+
|
124 |
+
# Save JSON result
|
125 |
+
with open("detected_image_info.json", "w") as f:
|
126 |
+
json.dump(result, f, indent=4)
|
127 |
+
print("Detection results saved to detected_image_info.json")
|
128 |
+
return jsonify(result)
|
129 |
+
|
130 |
+
if __name__ == "__main__":
|
131 |
+
app.run(debug=True)
|
app_BLIP_processing.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
from langchain.schema import BaseOutputParser
|
4 |
+
from PIL import Image
|
5 |
+
import torch
|
6 |
+
|
7 |
+
# Define a simple Output Parser
|
8 |
+
class CaptionParser(BaseOutputParser):
|
9 |
+
def parse(self, text: str):
|
10 |
+
return text.strip()
|
11 |
+
|
12 |
+
# LangChain-compatible VLM wrapper
|
13 |
+
class BLIPImageCaptioning:
|
14 |
+
def __init__(self):
|
15 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base", use_auth_token=None)
|
17 |
+
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", use_auth_token=None).to(self.device)
|
18 |
+
|
19 |
+
def predict(self, image_path: str) -> str:
|
20 |
+
raw_image = Image.open(image_path).convert('RGB')
|
21 |
+
inputs = self.processor(raw_image, return_tensors="pt").to(self.device)
|
22 |
+
out = self.model.generate(**inputs)
|
23 |
+
caption = self.processor.decode(out[0], skip_special_tokens=True)
|
24 |
+
return caption
|
25 |
+
|
26 |
+
# Use the BLIP model via LangChain
|
27 |
+
class ImageCaptionChain:
|
28 |
+
def __init__(self):
|
29 |
+
self.captioner = BLIPImageCaptioning()
|
30 |
+
self.output_parser = CaptionParser()
|
31 |
+
|
32 |
+
def run(self, image_path: str):
|
33 |
+
caption = self.captioner.predict(image_path)
|
34 |
+
return self.output_parser.parse(caption)
|
35 |
+
|
36 |
+
# ----------- Run Example -------------
|
37 |
+
if __name__ == "__main__":
|
38 |
+
image_path = r"images\sample.jpg" # Replace with your image path
|
39 |
+
chain = ImageCaptionChain()
|
40 |
+
caption = chain.run(image_path)
|
41 |
+
print("Generated Caption:", caption)
|
app_main copy 2.py
ADDED
@@ -0,0 +1,1248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, request, jsonify
|
2 |
+
import json,base64,io,os,logging,re
|
3 |
+
import numpy as np
|
4 |
+
from unstructured.partition.pdf import partition_pdf
|
5 |
+
from PIL import Image
|
6 |
+
# from imutils.perspective import four_point_transform
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import pytesseract
|
9 |
+
# from transformers import AutoProcessor, AutoModelForImageTextToText, AutoModelForVision2Seq
|
10 |
+
from langchain_community.document_loaders.image_captions import ImageCaptionLoader
|
11 |
+
from werkzeug.utils import secure_filename
|
12 |
+
from langchain_groq import ChatGroq
|
13 |
+
from langgraph.prebuilt import create_react_agent
|
14 |
+
from pdf2image import convert_from_bytes #convert_from_path,
|
15 |
+
import asyncio
|
16 |
+
from concurrent.futures import ThreadPoolExecutor
|
17 |
+
from pdf2image.exceptions import PDFInfoNotInstalledError
|
18 |
+
from typing import Dict, TypedDict, Optional, Any
|
19 |
+
from langgraph.graph import StateGraph, END
|
20 |
+
import uuid
|
21 |
+
import shutil, time, functools
|
22 |
+
from langchain_experimental.open_clip.open_clip import OpenCLIPEmbeddings
|
23 |
+
# from matplotlib.offsetbox import OffsetImage, AnnotationBbox
|
24 |
+
from io import BytesIO
|
25 |
+
|
26 |
+
# https://prthm11-Scratch_vlm_v1.hf.space/
|
27 |
+
# https://huggingface.co/spaces/prthm11/Scratch_vlm_v1
|
28 |
+
# https://prthm11-scratch-vlm-v1.hf.space/process_pdf
|
29 |
+
|
30 |
+
# def log_execution_time(func):
|
31 |
+
# @functools.wraps(func)
|
32 |
+
# def wrapper(*args, **kwargs):
|
33 |
+
# start_time = time.time()
|
34 |
+
# result = func(*args, **kwargs)
|
35 |
+
# end_time = time.time()
|
36 |
+
# logger.info(f"⏱ {func.__name__} executed in {end_time - start_time:.2f} seconds")
|
37 |
+
# return result
|
38 |
+
# return wrapper
|
39 |
+
|
40 |
+
# ============================== #
|
41 |
+
# INITIALIZE CLIP EMBEDDER #
|
42 |
+
# ============================== #
|
43 |
+
clip_embd = OpenCLIPEmbeddings()
|
44 |
+
|
45 |
+
# Configure logging
|
46 |
+
logging.basicConfig(
|
47 |
+
level=logging.DEBUG, # Use INFO or ERROR in production
|
48 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
49 |
+
handlers=[
|
50 |
+
logging.FileHandler("app.log"),
|
51 |
+
logging.StreamHandler()
|
52 |
+
]
|
53 |
+
)
|
54 |
+
|
55 |
+
logger = logging.getLogger(__name__)
|
56 |
+
|
57 |
+
load_dotenv()
|
58 |
+
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
59 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
60 |
+
|
61 |
+
llm = ChatGroq(
|
62 |
+
# model="meta-llama/llama-4-scout-17b-16e-instruct",
|
63 |
+
model = "meta-llama/llama-4-maverick-17b-128e-instruct",
|
64 |
+
temperature=0,
|
65 |
+
max_tokens=None,
|
66 |
+
)
|
67 |
+
|
68 |
+
app = Flask(__name__)
|
69 |
+
|
70 |
+
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
|
71 |
+
poppler_path = r"C:\poppler-23.11.0\Library\bin"
|
72 |
+
|
73 |
+
count = 0
|
74 |
+
|
75 |
+
OUTPUT_FOLDER = "OUTPUTS"
|
76 |
+
DETECTED_IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "DETECTED_IMAGE")
|
77 |
+
IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "SCANNED_IMAGE")
|
78 |
+
JSON_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "EXTRACTED_JSON")
|
79 |
+
|
80 |
+
for path in [OUTPUT_FOLDER, IMAGE_FOLDER_PATH, DETECTED_IMAGE_FOLDER_PATH, JSON_FOLDER_PATH]:
|
81 |
+
os.makedirs(path, exist_ok=True)
|
82 |
+
|
83 |
+
# class GameState(TypedDict):
|
84 |
+
# image: str
|
85 |
+
# pseudo_node: Optional[Dict]
|
86 |
+
|
87 |
+
# # Refined SYSTEM_PROMPT with more explicit Scratch JSON rules, especially for variables
|
88 |
+
# SYSTEM_PROMPT = """
|
89 |
+
# You are an expert AI assistant named GameScratchAgent, specialized in generating and modifying Scratch-VM 3.x game project JSON.
|
90 |
+
# Your core task is to process game descriptions and existing Scratch JSON structures, then produce or update JSON segments accurately.
|
91 |
+
# You possess deep knowledge of Scratch 3.0 project schema, informed by comprehensive reference materials. When generating or modifying the `blocks` section, pay extremely close attention to the following:
|
92 |
+
|
93 |
+
# **Scratch Project JSON Schema Rules:**
|
94 |
+
|
95 |
+
# 1. **Target Structure (`project.json`'s `targets` array):**
|
96 |
+
# * Each object in the `targets` array represents a Stage or a Sprite.
|
97 |
+
# * `isStage`: A boolean indicating if the target is the Stage (`true`) or a Sprite (`false`).
|
98 |
+
# * `name`: The name of the Stage (e.g., `"Stage"`) or the Sprite (e.g., `"Cat"`). This property replaces `objName` found in older Scratch versions.
|
99 |
+
# * `variables` dictionary: This dictionary maps unique variable IDs to arrays `[variable_name, initial_value, isCloudVariable?]`.
|
100 |
+
# * `variable_name`: The user-defined name of the variable.
|
101 |
+
# * `initial_value`: The variable's initial value, which can be a number or a string.
|
102 |
+
# * `isCloudVariable?`: (Optional) A boolean indicating if it's a cloud variable (`true`) or a local variable (`false` or absent for regular variables).
|
103 |
+
# * Example: `"myVarId123": ["score", 0]`, `"cloudVarId456": ["☁ High Score", "54", true]`
|
104 |
+
# * `lists` dictionary: This dictionary maps unique list IDs to arrays `[list_name, [item1, item2, ...]]`.
|
105 |
+
# * Example: `"myListId789": ["my list", ["apple", "banana"]]`
|
106 |
+
# * `broadcasts` dictionary: This dictionary maps unique broadcast IDs to their names.
|
107 |
+
# * Example: `"myBroadcastId": "Game Over"`
|
108 |
+
# * `blocks` dictionary: This dictionary contains all the blocks belonging to this target. Keys are block IDs, values are block objects.
|
109 |
+
|
110 |
+
# 2. **Block Structure (within a `target`'s `blocks` dictionary):**
|
111 |
+
# * Every block object must have the following core properties:
|
112 |
+
# * [cite_start]`opcode`: A unique internal identifier for the block's specific functionality (e.g., `"motion_movesteps"`, `"event_whenflagclicked"`)[cite: 31, 18, 439, 452].
|
113 |
+
# * `parent`: The ID of the block directly above it in the script stack (or `null` for a top-level block).
|
114 |
+
# * `next`: The ID of the block directly below it in the script stack (or `null` for the end of a stack).
|
115 |
+
# * `inputs`: An object defining values or blocks plugged into the block's input slots. Values are **arrays**.
|
116 |
+
# * `fields`: An object defining dropdown menu selections or direct internal values within the block. Values are **arrays**.
|
117 |
+
# * `shadow`: `true` if it's a shadow block (e.g., a default number input that can be replaced by another block), `false` otherwise.
|
118 |
+
# * `topLevel`: `true` if it's a hat block or a standalone block (not connected to a parent), `false` otherwise.
|
119 |
+
|
120 |
+
# 3. **`inputs` Property Details (for blocks plugged into input slots):**
|
121 |
+
# * **Direct Block Connection (Reporter/Boolean block plugged in):**
|
122 |
+
# * Format: `"<INPUT_NAME>": [1, "<blockId_of_plugged_block>"]`
|
123 |
+
# * Example: `"CONDITION": [1, "someBooleanBlockId"]` (e.g., for an `if` block).
|
124 |
+
# * **Literal Value Input (Shadow block with a literal):**
|
125 |
+
# * Format: `"<INPUT_NAME>": [1, [<type_code>, "<value_string>"]]`
|
126 |
+
# * `type_code`: A numeric code representing the data type. Common codes include: `4` for number, `7` for string/text, `10` for string/message.
|
127 |
+
# * `value_string`: The literal value as a string.
|
128 |
+
# * Examples:
|
129 |
+
# * Number: `"STEPS": [1, [4, "10"]]` (for `move 10 steps` block).
|
130 |
+
# * String/Text: `"MESSAGE": [1, [7, "Hello"]]` (for `say Hello` block).
|
131 |
+
# * String/Message (common for text inputs): `"MESSAGE": [1, [10, "Hello!"]]` (for `say Hello! for 2 secs`).
|
132 |
+
# * **C-Block Substack (blocks within a loop or conditional):**
|
133 |
+
# * Format: `"<SUBSTACK_NAME>": [2, "<blockId_of_first_block_in_substack>"]`
|
134 |
+
# * Common `SUBSTACK_NAME` values are `SUBSTACK` (for `if`, `forever`, `repeat`) and `SUBSTACK2` (for `else` in `if else`).
|
135 |
+
# * Example: `"SUBSTACK": [2, "firstBlockInLoopId"]`
|
136 |
+
|
137 |
+
# 4. **`fields` Property Details (for dropdowns or direct internal values):**
|
138 |
+
# * Used for dropdown menus, variable names, list names, or other static selections directly within the block.
|
139 |
+
# * Format: `"<FIELD_NAME>": ["<selected_value>", null]`
|
140 |
+
# * Examples:
|
141 |
+
# * Dropdown: `"KEY_OPTION": ["space", null]` (for `when space key pressed`).
|
142 |
+
# * Variable Name: `"VARIABLE": ["score", null]` (for `set score to 0`).
|
143 |
+
# * Direction (specific motion block): `"FORWARD_BACKWARD": ["forward", null]` (for `go forward layers`).
|
144 |
+
|
145 |
+
# 5. **Unique IDs:**
|
146 |
+
# * All block IDs, variable IDs, and list IDs must be unique strings (e.g., "myBlock123", "myVarId456", "myListId789"). Do NOT use placeholder strings like "block_id_here".
|
147 |
+
|
148 |
+
# 6. **No Nested `blocks` Dictionary:**
|
149 |
+
# * The `blocks` dictionary should only appear once per `target` (sprite/stage). Do NOT nest a `blocks` dictionary inside an individual block definition. Blocks that are part of a substack are linked via the `SUBSTACK` input.
|
150 |
+
|
151 |
+
# 7. **Asset Properties (for Costumes/Sounds):**
|
152 |
+
# * `assetId`, `md5ext`, `bitmapResolution`, `rotationCenterX`/`rotationCenterY` should be correctly associated with costume and sound objects within the `costumes` and `sounds` arrays.
|
153 |
+
|
154 |
+
# **General Principles and Important Considerations:**
|
155 |
+
# * **Backward Compatibility:** Adhere strictly to existing Scratch 3.0 opcodes and schema to ensure backward compatibility with older projects. [cite_start]Opcodes must remain consistent to prevent previously saved projects from failing to load or behaving unexpectedly[cite: 18, 19, 25, 65].
|
156 |
+
# * **Forgiving Inputs:** Recognize that Scratch is designed to be "forgiving in its interpretation of inputs." [cite_start]The Scratch VM handles potentially "invalid" inputs gracefully (e.g., converting a number to a string if expected, returning default values like zero or empty strings, or performing no action) rather than crashing[cite: 20, 21, 22, 38, 39, 41]. This implies that precise type matching for inputs might be handled internally by Scratch, allowing for some flexibility in how values are provided, but the agent should aim for the most common and logical type.
|
157 |
+
# """
|
158 |
+
|
159 |
+
# SYSTEM_PROMPT_JSON_CORRECTOR ="""
|
160 |
+
# You are an assistant that outputs JSON responses strictly following the given schema.
|
161 |
+
# If the JSON you produce has any formatting errors, missing required fields, or invalid structure, you must identify the problems and correct them.
|
162 |
+
# Always return only valid JSON that fully conforms to the schema below, enclosed in triple backticks (```), without any extra text or explanation.
|
163 |
+
|
164 |
+
# If you receive an invalid or incomplete JSON response, fix it by:
|
165 |
+
# - Adding any missing required fields with appropriate values.
|
166 |
+
# - Correcting syntax errors such as missing commas, brackets, or quotes.
|
167 |
+
# - Ensuring the JSON structure matches the schema exactly.
|
168 |
+
|
169 |
+
# Remember: Your output must be valid JSON only, ready to be parsed without errors.
|
170 |
+
# """
|
171 |
+
# # debugger and resolver agent for Scratch 3.0
|
172 |
+
# agent_json_resolver = create_react_agent(
|
173 |
+
# model=llm,
|
174 |
+
# tools=[], # No specific tools are defined here, but could be added later
|
175 |
+
# prompt=SYSTEM_PROMPT_JSON_CORRECTOR
|
176 |
+
# )
|
177 |
+
|
178 |
+
# # Helper function to load the block catalog from a JSON file
|
179 |
+
# def _load_block_catalog(file_path: str) -> Dict:
|
180 |
+
# """Loads the Scratch block catalog from a specified JSON file."""
|
181 |
+
# try:
|
182 |
+
# with open(file_path, 'r') as f:
|
183 |
+
# catalog = json.load(f)
|
184 |
+
# logger.info(f"Successfully loaded block catalog from {file_path}")
|
185 |
+
# return catalog
|
186 |
+
# except FileNotFoundError:
|
187 |
+
# logger.error(f"Error: Block catalog file not found at {file_path}")
|
188 |
+
# # Return an empty dict or raise an error, depending on desired behavior
|
189 |
+
# return {}
|
190 |
+
# except json.JSONDecodeError as e:
|
191 |
+
# logger.error(f"Error decoding JSON from {file_path}: {e}")
|
192 |
+
# return {}
|
193 |
+
# except Exception as e:
|
194 |
+
# logger.error(f"An unexpected error occurred while loading {file_path}: {e}")
|
195 |
+
# return {}
|
196 |
+
|
197 |
+
# # --- Global variable for the block catalog ---
|
198 |
+
# ALL_SCRATCH_BLOCKS_CATALOG = {}
|
199 |
+
# BLOCK_CATALOG_PATH = r"blocks\blocks.json" # Define the path to your JSON file
|
200 |
+
# HAT_BLOCKS_PATH = r"blocks\hat_blocks.json" # Path to the hat blocks JSON file
|
201 |
+
# STACK_BLOCKS_PATH = r"blocks\stack_blocks.json" # Path to the stack blocks JSON file
|
202 |
+
# REPORTER_BLOCKS_PATH = r"blocks\reporter_blocks.json" # Path to the reporter blocks JSON file
|
203 |
+
# BOOLEAN_BLOCKS_PATH = r"blocks\boolean_blocks.json" # Path to the boolean blocks JSON file
|
204 |
+
# C_BLOCKS_PATH = r"blocks\c_blocks.json" # Path to the C blocks JSON file
|
205 |
+
# CAP_BLOCKS_PATH = r"blocks\cap_blocks.json" # Path to the cap blocks JSON file
|
206 |
+
|
207 |
+
# # Load the block catalogs from their respective JSON files
|
208 |
+
# hat_block_data = _load_block_catalog(HAT_BLOCKS_PATH)
|
209 |
+
# hat_description = hat_block_data["description"]
|
210 |
+
# hat_opcodes_functionalities = os.path.join(HAT_BLOCKS_PATH, "hat_blocks.txt")
|
211 |
+
|
212 |
+
# boolean_block_data = _load_block_catalog(BOOLEAN_BLOCKS_PATH)
|
213 |
+
# boolean_description = boolean_block_data["description"]
|
214 |
+
# boolean_opcodes_functionalities = os.path.join(BOOLEAN_BLOCKS_PATH, "boolean_blocks.txt")
|
215 |
+
|
216 |
+
# c_block_data = _load_block_catalog(C_BLOCKS_PATH)
|
217 |
+
# c_description = c_block_data["description"]
|
218 |
+
# c_opcodes_functionalities = os.path.join(C_BLOCKS_PATH, "c_blocks.txt")
|
219 |
+
|
220 |
+
|
221 |
+
# cap_block_data = _load_block_catalog(CAP_BLOCKS_PATH)
|
222 |
+
# cap_description = cap_block_data["description"]
|
223 |
+
# cap_opcodes_functionalities = os.path.join(CAP_BLOCKS_PATH, "cap_blocks.txt")
|
224 |
+
|
225 |
+
# reporter_block_data = _load_block_catalog(REPORTER_BLOCKS_PATH)
|
226 |
+
# reporter_description = reporter_block_data["description"]
|
227 |
+
# reporter_opcodes_functionalities = os.path.join(REPORTER_BLOCKS_PATH, "reporter_blocks.txt")
|
228 |
+
|
229 |
+
# stack_block_data = _load_block_catalog(STACK_BLOCKS_PATH)
|
230 |
+
# stack_description = stack_block_data["description"]
|
231 |
+
# stack_opcodes_functionalities = os.path.join(STACK_BLOCKS_PATH, "stack_blocks.txt")
|
232 |
+
|
233 |
+
|
234 |
+
# # Helper function to extract JSON from LLM response
|
235 |
+
# def extract_json_from_llm_response(raw_response: str) -> dict:
|
236 |
+
# # --- 1) Pull out the JSON code‑block if present ---
|
237 |
+
# md = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", raw_response)
|
238 |
+
# json_string = md.group(1).strip() if md else raw_response
|
239 |
+
|
240 |
+
# # --- 2) Trim to the outermost { … } so we drop any prefix/suffix junk ---
|
241 |
+
# first, last = json_string.find('{'), json_string.rfind('}')
|
242 |
+
# if 0 <= first < last:
|
243 |
+
# json_string = json_string[first:last+1]
|
244 |
+
|
245 |
+
# # --- 3) PRE‑CLEANUP: remove stray assistant{…}, rogue assistant keys, fix boolean quotes ---
|
246 |
+
# json_string = re.sub(r'\b\w+\s*{', '{', json_string)
|
247 |
+
# json_string = re.sub(r'"assistant"\s*:', '', json_string)
|
248 |
+
# json_string = re.sub(r'\b(false|true)"', r'\1', json_string)
|
249 |
+
# logger.debug("Ran pre‑cleanup for stray tokens and boolean quotes.")
|
250 |
+
|
251 |
+
# # --- 3.1) Fix stray inner quotes at start of name/list values ---
|
252 |
+
# # e.g., { "name": " \"recent_scoress\"", ... } → "recent_scoress"
|
253 |
+
# json_string = re.sub(
|
254 |
+
# r'("name"\s*:\s*")\s*"',
|
255 |
+
# r'\1',
|
256 |
+
# json_string
|
257 |
+
# )
|
258 |
+
|
259 |
+
# # --- 4) Escape all embedded quotes in any `logic` value up to the next key ---
|
260 |
+
# def _esc(m):
|
261 |
+
# prefix, body = m.group(1), m.group(2)
|
262 |
+
# return prefix + body.replace('"', r'\"')
|
263 |
+
# json_string = re.sub(
|
264 |
+
# r'("logic"\s*:\s*")([\s\S]+?)(?=",\s*"[A-Za-z_]\w*"\s*:\s*)',
|
265 |
+
# _esc,
|
266 |
+
# json_string
|
267 |
+
# )
|
268 |
+
# logger.debug("Escaped embedded quotes in logic fields.")
|
269 |
+
|
270 |
+
# logger.debug("Quoted unquoted keys.")
|
271 |
+
|
272 |
+
# # --- 6) Remove trailing commas before } or ] ---
|
273 |
+
# json_string = re.sub(r',\s*(?=[}\],])', '', json_string)
|
274 |
+
# json_string = re.sub(r',\s*,', ',', json_string)
|
275 |
+
# logger.debug("Removed trailing commas.")
|
276 |
+
|
277 |
+
# # --- 7) Balance braces: drop extra } at end if needed ---
|
278 |
+
# ob, cb = json_string.count('{'), json_string.count('}')
|
279 |
+
# if cb > ob:
|
280 |
+
# excess = cb - ob
|
281 |
+
# json_string = json_string.rstrip()[:-excess]
|
282 |
+
# logger.debug(f"Stripped {excess} extra closing brace(s).")
|
283 |
+
|
284 |
+
# # --- 8) Escape literal newlines in *all* string values ---
|
285 |
+
# json_string = re.sub(
|
286 |
+
# r'"((?:[^"\\]|\\.)*?)"',
|
287 |
+
# lambda m: '"' + m.group(1).replace('\n', '\\n').replace('\r', '\\r') + '"',
|
288 |
+
# json_string,
|
289 |
+
# flags=re.DOTALL
|
290 |
+
# )
|
291 |
+
# logger.debug("Escaped newlines in strings.")
|
292 |
+
|
293 |
+
# # --- 9) Final parse attempt ---
|
294 |
+
# try:
|
295 |
+
# return json.loads(json_string)
|
296 |
+
# except json.JSONDecodeError:
|
297 |
+
# logger.error("Sanitized JSON still invalid:\n%s", json_string)
|
298 |
+
# raise
|
299 |
+
|
300 |
+
# # Main agent of the system agent for Scratch 3.0
|
301 |
+
# agent = create_react_agent(
|
302 |
+
# model=llm,
|
303 |
+
# tools=[], # No specific tools are defined here, but could be added later
|
304 |
+
# prompt=SYSTEM_PROMPT
|
305 |
+
# )
|
306 |
+
|
307 |
+
# # Node 6: Logic updating if any issue here
|
308 |
+
# def plan_logic_aligner_node(state: GameState):
|
309 |
+
# logger.info("--- Running plan_logic_aligner_node ---")
|
310 |
+
|
311 |
+
# image = state.get("image", "")
|
312 |
+
|
313 |
+
# refinement_prompt = f"""
|
314 |
+
# You are an expert in Scratch 3.0 game development, specializing in understanding block relationships (stacked, nested).
|
315 |
+
# "Analyze the Scratch code-block image and generate Pseudo-Code for what this logic appears to be doing."
|
316 |
+
# From Image, you also have to detect a value of Key given in Text form "Script for: ". Below is the example
|
317 |
+
# Example: "Script for: Bear", "Script for:" is a key and "Bear" is value.
|
318 |
+
# --- Scratch 3.0 Block Reference ---
|
319 |
+
# ### Hat Blocks
|
320 |
+
# Description: {hat_description}
|
321 |
+
# Blocks:
|
322 |
+
# {hat_opcodes_functionalities}
|
323 |
+
|
324 |
+
# ### Boolean Blocks
|
325 |
+
# Description: {boolean_description}
|
326 |
+
# Blocks:
|
327 |
+
# {boolean_opcodes_functionalities}
|
328 |
+
|
329 |
+
# ### C Blocks
|
330 |
+
# Description: {c_description}
|
331 |
+
# Blocks:
|
332 |
+
# {c_opcodes_functionalities}
|
333 |
+
|
334 |
+
# ### Cap Blocks
|
335 |
+
# Description: {cap_description}
|
336 |
+
# Blocks:
|
337 |
+
# {cap_opcodes_functionalities}
|
338 |
+
|
339 |
+
# ### Reporter Blocks
|
340 |
+
# Description: {reporter_description}
|
341 |
+
# Blocks:
|
342 |
+
# {reporter_opcodes_functionalities}
|
343 |
+
|
344 |
+
# ### Stack Blocks
|
345 |
+
# Description: {stack_description}
|
346 |
+
# Blocks:
|
347 |
+
# {stack_opcodes_functionalities}
|
348 |
+
# -----------------------------------
|
349 |
+
|
350 |
+
# Your task is to:
|
351 |
+
# If you don't find any "Code-Blocks" then,
|
352 |
+
# **Don't generate Pseudo Code, and pass the message "No Code-blocks" find...
|
353 |
+
# If you find any "Code-Blocks" then,
|
354 |
+
# 1. **Refine the 'logic'**: Make it precise, accurate, and fully aligned with the Game Description. Use Scratch‑consistent verbs and phrasing. **Do NOT** use raw double‑quotes inside the logic string.
|
355 |
+
|
356 |
+
# 2. **Structural requirements**:
|
357 |
+
# - **Numeric values** `(e.g., 0, 5, 0.2, -130)` **must** be in parentheses: `(0)`, `(5)`, `(0.2)`, `(-130)`.
|
358 |
+
# - **AlphaNumeric values** `(e.g., hello, say 5, 4, hi!)` **must** be in parentheses: `(hello)`, `(say 5)`, `(4)`, `(hi!)`.
|
359 |
+
# - **Variables** must be in the form `[variable v]` (e.g., `[score v]`), even when used inside expressions two example use `set [score v] to (1)` or `show variable ([speed v])`.
|
360 |
+
# - **Dropdown options** must be in the form `[option v]` (e.g., `[Game Start v]`, `[blue sky v]`). example use `when [space v] key pressed`.
|
361 |
+
# - **Reporter blocks** used as inputs must be double‑wrapped: `((x position))`, `((y position))`. example use `if <((y position)) = (-130)> then` or `(((x position)) * (1))`.
|
362 |
+
# - **Boolean blocks** in conditions must be inside `< >`, including nested ones: `<not <condition>>`, `<<cond1> and <cond2>>`,`<<cond1> or <cond2>>`.
|
363 |
+
# - **Other Boolean blocks** in conditions must be inside `< >`, including nested ones or values or variables: `<(block/value/variable) * (block/value/variable)>`,`<(block/value/variable) < (block/value/variable)>`, and example of another variable`<[apple v] contains [a v]?>`.
|
364 |
+
# - **Operator expressions** must use explicit Scratch operator blocks, e.g.:
|
365 |
+
# ```
|
366 |
+
# (([ballSpeed v]) * (1.1))
|
367 |
+
# ```
|
368 |
+
# - **Every hat block script must end** with a final `end` on its own line.
|
369 |
+
|
370 |
+
# 3. **Pseudo‑code formatting**:
|
371 |
+
# - Represent each block or nested block on its own line.
|
372 |
+
# - Indent nested blocks by 4 spaces under their parent (`forever`, `if`, etc.).
|
373 |
+
# - No comments or explanatory text—just the block sequence.
|
374 |
+
# - a natural language breakdown of each step taken after the event, formatted as a multi-line string representing pseudo-code. Ensure clarity and granularity—each described action should map closely to a Scratch block or tight sequence.
|
375 |
+
|
376 |
+
# 4. **Logic content**:
|
377 |
+
# - Build clear flow for mechanics (movement, jumping, flying, scoring, collisions).
|
378 |
+
# - Match each action closely to a Scratch block or tight sequence.
|
379 |
+
# - Do **NOT** include any justification or comments—only the raw logic.
|
380 |
+
|
381 |
+
# 5. **Examples for reference**:
|
382 |
+
# **Correct** pattern for a simple start script:
|
383 |
+
# ```
|
384 |
+
# when green flag clicked
|
385 |
+
# switch backdrop to [blue sky v]
|
386 |
+
# set [score v] to (0)
|
387 |
+
# show variable [score v]
|
388 |
+
# broadcast [Game Start v]
|
389 |
+
# end
|
390 |
+
# ```
|
391 |
+
# **Correct** pattern for updating the high score variable handling:
|
392 |
+
# ```
|
393 |
+
# when I receive [Game Over v]
|
394 |
+
# if <((score)) > (([High Score v]))> then
|
395 |
+
# set [High Score v] to ([score v])
|
396 |
+
# end
|
397 |
+
# switch backdrop to [Game Over v]
|
398 |
+
# end
|
399 |
+
# ```
|
400 |
+
# **Correct** pattern for level up and increase difficulty use:
|
401 |
+
# ```
|
402 |
+
# when I receive [Level Up v]
|
403 |
+
# change [level v] by (1)
|
404 |
+
# set [ballSpeed v] to ((([ballSpeed v]) * (1.1)))
|
405 |
+
# end
|
406 |
+
# ```
|
407 |
+
# **Correct** pattern for jumping mechanics use:
|
408 |
+
# ```
|
409 |
+
# when [space v] key pressed
|
410 |
+
# if <((y position)) = (-100)> then
|
411 |
+
# repeat (5)
|
412 |
+
# change y by (100)
|
413 |
+
# wait (0.1) seconds
|
414 |
+
# change y by (-100)
|
415 |
+
# wait (0.1) seconds
|
416 |
+
# end
|
417 |
+
# end
|
418 |
+
# end
|
419 |
+
# ```
|
420 |
+
# **Correct** pattern for continuos moving objects use:
|
421 |
+
# ```
|
422 |
+
# when green flag clicked
|
423 |
+
# go to x: (240) y: (-100)
|
424 |
+
# set [speed v] to (-5)
|
425 |
+
# show variable [speed v]
|
426 |
+
# forever
|
427 |
+
# change x by ([speed v])
|
428 |
+
# if <((x position)) < (-240)> then
|
429 |
+
# go to x: (240) y: (-100)
|
430 |
+
# end
|
431 |
+
# end
|
432 |
+
# end
|
433 |
+
# ```
|
434 |
+
# **Correct** pattern for continuos moving objects use:
|
435 |
+
# ```
|
436 |
+
# when green flag clicked
|
437 |
+
# go to x: (240) y: (-100)
|
438 |
+
# set [speed v] to (-5)
|
439 |
+
# show variable [speed v]
|
440 |
+
# forever
|
441 |
+
# change x by ([speed v])
|
442 |
+
# if <((x position)) < (-240)> then
|
443 |
+
# go to x: (240) y: (-100)
|
444 |
+
# end
|
445 |
+
# end
|
446 |
+
# end
|
447 |
+
# ```
|
448 |
+
# 6. **Donot** add any explaination of logic or comments to justify or explain just put the logic content in the json.
|
449 |
+
# 7. **Output**:
|
450 |
+
# Return **only** a JSON object, using double quotes everywhere:
|
451 |
+
# ```json
|
452 |
+
# {{
|
453 |
+
# "refined_logic":{{
|
454 |
+
# "name_variable": 'Value of "Sript for: "',
|
455 |
+
# "pseudocode":"…your fully‑formatted pseudo‑code here…",
|
456 |
+
# }}
|
457 |
+
# }}
|
458 |
+
# ```
|
459 |
+
# """
|
460 |
+
# image_input = {
|
461 |
+
# "type": "image_url",
|
462 |
+
# "image_url": {
|
463 |
+
# "url": f"data:image/png;base64,{image}"
|
464 |
+
# }
|
465 |
+
# }
|
466 |
+
|
467 |
+
# content = [
|
468 |
+
# {"type": "text", "text": refinement_prompt},
|
469 |
+
# image_input
|
470 |
+
# ]
|
471 |
+
|
472 |
+
# try:
|
473 |
+
# # Invoke the main agent for logic refinement and relationship identification
|
474 |
+
# response = agent.invoke({"messages": [{"role": "user", "content": content}]})
|
475 |
+
# llm_output_raw = response["messages"][-1].content.strip()
|
476 |
+
|
477 |
+
# parsed_llm_output = extract_json_from_llm_response(llm_output_raw)
|
478 |
+
|
479 |
+
# # result = parsed_llm_output
|
480 |
+
# # Extract needed values directly
|
481 |
+
# logic_data = parsed_llm_output.get("refined_logic", {})
|
482 |
+
# name_variable = logic_data.get("name_variable", "Unknown")
|
483 |
+
# pseudocode = logic_data.get("pseudocode", "No logic extracted")
|
484 |
+
|
485 |
+
# result = {"pseudo_node": {
|
486 |
+
# "name_variable": name_variable,
|
487 |
+
# "pseudocode": pseudocode
|
488 |
+
# }}
|
489 |
+
|
490 |
+
# print(f"result:\n\n {result}")
|
491 |
+
# return result
|
492 |
+
# except Exception as e:
|
493 |
+
# logger.error(f"❌ plan_logic_aligner_node failed: {str(e)}")
|
494 |
+
# return {"error": str(e)}
|
495 |
+
# except json.JSONDecodeError as error_json:
|
496 |
+
# # If JSON parsing fails, use the json resolver agent
|
497 |
+
# correction_prompt = (
|
498 |
+
# "Your task is to correct the provided JSON string to ensure it is **syntactically perfect and adheres strictly to JSON rules**.\n"
|
499 |
+
# "It must be a JSON object with `refined_logic` (string) and `block_relationships` (array of objects).\n"
|
500 |
+
# f"- **Error Details**: {error_json}\n\n"
|
501 |
+
# "**Strict Instructions for your response:**\n"
|
502 |
+
# "1. **ONLY** output the corrected JSON. Do not include any other text or explanations.\n"
|
503 |
+
# "2. Ensure all keys and string values are enclosed in **double quotes**. Escape internal quotes (`\\`).\n"
|
504 |
+
# "3. No trailing commas. Correct nesting.\n\n"
|
505 |
+
# "Here is the problematic JSON string to correct:\n"
|
506 |
+
# f"```json\n{llm_output_raw}\n```\n"
|
507 |
+
# "Corrected JSON:\n"
|
508 |
+
# )
|
509 |
+
# try:
|
510 |
+
# correction_response = agent_json_resolver.invoke({"messages": [{"role": "user", "content": correction_prompt}]})
|
511 |
+
# corrected_output = extract_json_from_llm_response(correction_response["messages"][-1].content)
|
512 |
+
# #block_relationships = corrected_output.get("block_relationships", [])
|
513 |
+
# result = {
|
514 |
+
# #"image_path": image_path,
|
515 |
+
# "pseudo_code": corrected_output
|
516 |
+
# }
|
517 |
+
|
518 |
+
# return result
|
519 |
+
|
520 |
+
# except Exception as e_corr:
|
521 |
+
# logger.error(f"Failed to correct JSON output for even after retry: {e_corr}")
|
522 |
+
|
523 |
+
# scratch_keywords = [
|
524 |
+
# "move", "turn", "wait", "repeat", "if", "else", "broadcast",
|
525 |
+
# "glide", "change", "forever", "when", "switch",
|
526 |
+
# "next costume", "set", "show", "hide", "play sound",
|
527 |
+
# "go to", "x position", "y position", "think", "say",
|
528 |
+
# "variable", "stop", "clone",
|
529 |
+
# "touching", "sensing", "pen", "clear","Scratch","Code","scratch blocks"
|
530 |
+
# ]
|
531 |
+
|
532 |
+
# filtered_sprites = {}
|
533 |
+
# Prepare manipulated sprite JSON structure
|
534 |
+
manipulated_json = {}
|
535 |
+
img_elements = []
|
536 |
+
|
537 |
+
# @log_execution_time
|
538 |
+
# --- FUNCTION: Extract images from saved PDF ---
|
539 |
+
def extract_images_from_pdf(pdf_stream: io.BytesIO):
|
540 |
+
''' Extract images from PDF and generate structured sprite JSON '''
|
541 |
+
try:
|
542 |
+
# pdf_filename = os.path.splitext(os.path.basename(pdf_stream))[0] # e.g., "scratch_crab"
|
543 |
+
if isinstance(pdf_stream, io.BytesIO):
|
544 |
+
# use a random ID since there's no filename
|
545 |
+
pdf_id = uuid.uuid4().hex
|
546 |
+
else:
|
547 |
+
pdf_id = os.path.splitext(os.path.basename(pdf_stream))[0]
|
548 |
+
# pdf_dir_path = os.path.dirname(pdf_stream).replace("/", "\\")
|
549 |
+
|
550 |
+
# Create subfolders
|
551 |
+
# extracted_image_subdir = os.path.join(DETECTED_IMAGE_FOLDER_PATH, pdf_filename)
|
552 |
+
# json_subdir = os.path.join(JSON_FOLDER_PATH, pdf_filename)
|
553 |
+
# os.makedirs(extracted_image_subdir, exist_ok=True)
|
554 |
+
# os.makedirs(json_subdir, exist_ok=True)
|
555 |
+
# pdf_bytes = pdf_stream.getvalue()
|
556 |
+
try:
|
557 |
+
elements = partition_pdf(
|
558 |
+
file=pdf_stream,
|
559 |
+
strategy="hi_res",
|
560 |
+
extract_image_block_types=["Image"],
|
561 |
+
hi_res_model_name="yolox",
|
562 |
+
extract_image_block_to_payload=True, # Set to True to get base64 in output
|
563 |
+
)
|
564 |
+
except Exception as e:
|
565 |
+
raise RuntimeError(
|
566 |
+
f"❌ Failed to extract images from PDF: {str(e)}")
|
567 |
+
|
568 |
+
file_elements = [element.to_dict() for element in elements]
|
569 |
+
|
570 |
+
sprite_count = 1
|
571 |
+
for el in file_elements:
|
572 |
+
img_b64 = el["metadata"].get("image_base64")
|
573 |
+
# with open(os.path.join(DETECTED_IMAGE_FOLDER_PATH, f"img_{sprite_count}.png")) as d_img:
|
574 |
+
# d_img = img_b64
|
575 |
+
if not img_b64:
|
576 |
+
continue
|
577 |
+
|
578 |
+
# raw = base64.b64decode(img_b64)
|
579 |
+
# im = Image.open(io.BytesIO(raw)).convert("RGB")
|
580 |
+
# up = upscale_image(im, scale=2)
|
581 |
+
# buf = io.BytesIO()
|
582 |
+
# up.save(buf, format="PNG")
|
583 |
+
# buf.seek(0)
|
584 |
+
# img_elements.append(base64.b64encode(buf.getvalue()).decode())
|
585 |
+
# print(f"------------------------IMAGE ELEMENTS: \n{img_elements}")
|
586 |
+
|
587 |
+
# auto_id = f"sprite_{sprite_count}"
|
588 |
+
|
589 |
+
manipulated_json[f"Sprite {sprite_count}"] = {
|
590 |
+
# "id":auto_id,
|
591 |
+
# "name": name,
|
592 |
+
"base64": el["metadata"]["image_base64"],
|
593 |
+
"file-path": pdf_id,
|
594 |
+
# "description": description
|
595 |
+
}
|
596 |
+
sprite_count += 1
|
597 |
+
# print(f"************MANIPULATED JSON: {manipulated_json}")
|
598 |
+
|
599 |
+
# manipulated_json_path = os.path.join(JSON_FOLDER_PATH, "manipulated.json")
|
600 |
+
# with open(manipulated_json_path, 'w') as f:
|
601 |
+
# json.dump(manipulated_json, f, indent=2)
|
602 |
+
# def is_code_block(name: str) -> bool:
|
603 |
+
# for kw in scratch_keywords:
|
604 |
+
# if kw.lower() in name.lower():
|
605 |
+
# return True
|
606 |
+
# return False
|
607 |
+
|
608 |
+
# # Filter out code block images
|
609 |
+
# for key, value in manipulated_json.items():
|
610 |
+
# sprite_name = value.get("name", "")
|
611 |
+
# if not is_code_block(sprite_name):
|
612 |
+
# filtered_sprites[key] = value
|
613 |
+
# else:
|
614 |
+
# logger.info(f"🛑 Excluded code block-like image: {key}")
|
615 |
+
|
616 |
+
return manipulated_json
|
617 |
+
except Exception as e:
|
618 |
+
raise RuntimeError(f"❌ Error in extract_images_from_pdf: {str(e)}")
|
619 |
+
|
620 |
+
# @log_execution_time
|
621 |
+
def similarity_matching(sprites_data: str, project_folder:str) -> str:
|
622 |
+
logger.info("🔍 Running similarity matching...")
|
623 |
+
|
624 |
+
# ============================== #
|
625 |
+
# DEFINE PATHS #
|
626 |
+
# ============================== #
|
627 |
+
# backdrop_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\Backdrops"
|
628 |
+
# sprite_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\sprites"
|
629 |
+
# image_dirs = [backdrop_images_path, sprite_images_path]
|
630 |
+
|
631 |
+
project_json_path = os.path.join(project_folder, "project.json")
|
632 |
+
|
633 |
+
# ============================== #
|
634 |
+
# READ SPRITE METADATA #
|
635 |
+
# ============================== #
|
636 |
+
# with open(input_json_path, 'r') as f:
|
637 |
+
# sprites_data = json.load(f)
|
638 |
+
|
639 |
+
sprite_ids, texts, sprite_base64 = [], [], []
|
640 |
+
for sid, sprite in sprites_data.items():
|
641 |
+
sprite_ids.append(sid)
|
642 |
+
# texts.append(
|
643 |
+
# "This is " + sprite.get("description", sprite.get("name", "")))
|
644 |
+
sprite_base64.append(sprite["base64"])
|
645 |
+
# print(f"\nSPRITE_BASE64: \n{sprite_base64}\n\n")
|
646 |
+
|
647 |
+
sprite_images_bytes = []
|
648 |
+
for b64 in sprite_base64:
|
649 |
+
img = Image.open(BytesIO(base64.b64decode(b64.split(",")[-1]))).convert("RGB")
|
650 |
+
buffer = BytesIO()
|
651 |
+
img.save(buffer, format="PNG")
|
652 |
+
buffer.seek(0)
|
653 |
+
sprite_images_bytes.append(buffer)
|
654 |
+
|
655 |
+
# ========================================= #
|
656 |
+
# Walk folders to collect all image paths #
|
657 |
+
# ========================================= #
|
658 |
+
folder_image_paths = ['E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\bedroom\\8cc0b88d53345b3e337e8f028a32a4e7.png',
|
659 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\baseball\\7be1f5b3e682813dac1f297e52ff7dca.png',
|
660 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\beach_malibu\\050615fe992a00d6af0e664e497ebf53.png',
|
661 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\castle\\951765ee7f7370f120c9df20b577c22f.png',
|
662 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\hall\\ea86ca30b346f27ca5faf1254f6a31e3.png',
|
663 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\jungle\\f4f908da19e2753f3ed679d7b37650ca.png',
|
664 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Batter\\baseball_sprite_motion_1.png',
|
665 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Bear\\bear_motion_2.png',
|
666 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Beetle\\46d0dfd4ae7e9bfe3a6a2e35a4905eae.png',
|
667 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\cat\\cat_motion_1.png',
|
668 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Centaur\\2373556e776cad3ba4d6ee04fc34550b.png',
|
669 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Crab\\bear_element.png',
|
670 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Soccer Ball\\cat_football.png',
|
671 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script1.jpg',
|
672 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script2.jpg',
|
673 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script3.jpg',
|
674 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script4.jpg',
|
675 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script5.jpg',
|
676 |
+
'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\code_blocks\\script6.jpg' ]
|
677 |
+
|
678 |
+
# ============================== #
|
679 |
+
# EMBED SPRITE IMAGES #
|
680 |
+
# ============================== #
|
681 |
+
sprite_features = clip_embd.embed_image(sprite_images_bytes)
|
682 |
+
|
683 |
+
# # ============================== #
|
684 |
+
# # EMBED FOLDER IMAGES (REF) #
|
685 |
+
# # ============================== #
|
686 |
+
# img_features = clip_embd.embed_image(folder_image_paths)
|
687 |
+
|
688 |
+
# # ============================== #
|
689 |
+
# # Store image embeddings #
|
690 |
+
# # ============================== #
|
691 |
+
# embedding_json = []
|
692 |
+
# for i, path in enumerate(folder_image_paths):
|
693 |
+
# embedding_json.append({
|
694 |
+
# "name":os.path.basename(path),
|
695 |
+
# "file-path": path,
|
696 |
+
# "embeddings": list(img_features[i])
|
697 |
+
# })
|
698 |
+
|
699 |
+
# # # Save to embeddings.json
|
700 |
+
# with open(f"{OUTPUT_FOLDER}/embeddings.json", "w") as f:
|
701 |
+
# json.dump(embedding_json, f, indent=2)
|
702 |
+
|
703 |
+
# ============================== #
|
704 |
+
# COMPUTE SIMILARITIES #
|
705 |
+
# ============================== #
|
706 |
+
ref_embedding_path = f"{OUTPUT_FOLDER}/embeddings.json"
|
707 |
+
with open(ref_embedding_path, "r") as f:
|
708 |
+
embedding_json = json.load(f)
|
709 |
+
# print(f"\n\n EMBEDDING JSON: {embedding_json}")
|
710 |
+
|
711 |
+
img_matrix = np.array([img["embeddings"] for img in embedding_json])
|
712 |
+
sprite_matrix = np.array(sprite_features)
|
713 |
+
|
714 |
+
# if sprite_matrix.size == 0 or img_matrix.size == 0:
|
715 |
+
# raise RuntimeError("❌ No valid embeddings found for sprites or reference images.")
|
716 |
+
similarity = np.matmul(sprite_matrix, img_matrix.T)
|
717 |
+
# try:
|
718 |
+
# similarity = np.matmul(sprite_matrix, img_matrix.T)
|
719 |
+
# except ValueError as ve:
|
720 |
+
# if "matmul" in str(ve) and "size" in str(ve):
|
721 |
+
# logger.error("❌ Matrix multiplication failed due to shape mismatch. Likely due to empty or invalid embeddings.")
|
722 |
+
# raise RuntimeError("Matrix shape mismatch: CLIP embedding input is invalid or empty.")
|
723 |
+
# else:
|
724 |
+
# raise
|
725 |
+
most_similar_indices = np.argmax(similarity, axis=1)
|
726 |
+
sprite_base_path = r'E:\Pratham\2025\Harsh Sir\Scratch Vision\images\sprites'
|
727 |
+
# ============= Match and copy ===============
|
728 |
+
project_data = [] #will hold loaded "sprite.json" contents for building the final "project.json"
|
729 |
+
copied_folders = set() # prevents copying the same sprite folder more than once.
|
730 |
+
|
731 |
+
# =============================================================== #
|
732 |
+
# Loop through most similar images from Sprites folder #
|
733 |
+
# → Copy sprite assets (excluding matched image + sprite.json) #
|
734 |
+
# → Load sprite.json and append its data to project_data #
|
735 |
+
# =============================================================== #
|
736 |
+
#sprite_idx: index of sprite we are processing
|
737 |
+
#match_idx: index of the reference image with the highest similarity
|
738 |
+
for sprite_idx, matched_idx in enumerate(most_similar_indices):
|
739 |
+
matched_image_path = folder_image_paths[matched_idx]
|
740 |
+
print(f"------------ folder image paths: \n {folder_image_paths[matched_idx]}\n")
|
741 |
+
matched_image_path = os.path.normpath(matched_image_path)
|
742 |
+
|
743 |
+
matched_folder = os.path.dirname(matched_image_path)
|
744 |
+
print(f"\nMATCHED_FOLDER: {matched_folder}\n")
|
745 |
+
if not matched_folder.startswith(os.path.normpath(sprite_base_path)):
|
746 |
+
continue
|
747 |
+
|
748 |
+
folder_name = os.path.basename(matched_folder)
|
749 |
+
print(f"FOLDER NAME: {folder_name}")
|
750 |
+
print(f"================COPIED FOLDER: \n {copied_folders}\n")
|
751 |
+
if matched_folder in copied_folders:
|
752 |
+
continue
|
753 |
+
copied_folders.add(matched_folder)
|
754 |
+
logger.info(f"Matched sprites: {matched_image_path}")
|
755 |
+
|
756 |
+
sprite_json_path = os.path.join(matched_folder, 'sprite.json')
|
757 |
+
if not os.path.exists(sprite_json_path):
|
758 |
+
logger.warning(f"sprite.json not found in: {matched_folder}")
|
759 |
+
continue
|
760 |
+
|
761 |
+
with open(sprite_json_path, 'r') as f:
|
762 |
+
sprite_data = json.load(f)
|
763 |
+
# print(f"SPRITE DATA: \n{sprite_data}")
|
764 |
+
|
765 |
+
# Copy only non-matched files
|
766 |
+
for fname in os.listdir(matched_folder):
|
767 |
+
fpath = os.path.join(matched_folder, fname)
|
768 |
+
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'sprite.json'}:
|
769 |
+
shutil.copy2(fpath, os.path.join(project_folder, fname))
|
770 |
+
# logger.info(f"Copied Sprite asset: {fname}")
|
771 |
+
project_data.append(sprite_data)
|
772 |
+
|
773 |
+
# ================================================================== #
|
774 |
+
# Loop through most similar images from Backdrops folder #
|
775 |
+
# → Copy matched image + other backdrop assets into project_folder #
|
776 |
+
# → Load project.json and append its stage target to backdrop_data#
|
777 |
+
# ================================================================== #
|
778 |
+
backdrop_data = [] # for backdrop-related entries
|
779 |
+
copied_backdrop_folders = set() # prevent duplicate backdrops
|
780 |
+
|
781 |
+
# make sure backdrop_base_path is normalized
|
782 |
+
backdrop_base_path = os.path.normpath(r'E:\Pratham\2025\Harsh Sir\Scratch Vision\images\Backdrops')
|
783 |
+
|
784 |
+
for backdrop_idx, matched_idx in enumerate(most_similar_indices):
|
785 |
+
matched_image_path = os.path.normpath(folder_image_paths[matched_idx])
|
786 |
+
|
787 |
+
# only handle backdrops
|
788 |
+
if not matched_image_path.startswith(backdrop_base_path):
|
789 |
+
continue
|
790 |
+
|
791 |
+
matched_folder = os.path.dirname(matched_image_path)
|
792 |
+
# skip if backdrop folder already processed
|
793 |
+
if matched_folder in copied_backdrop_folders:
|
794 |
+
continue
|
795 |
+
copied_backdrop_folders.add(matched_folder)
|
796 |
+
|
797 |
+
matched_filename = os.path.basename(matched_image_path)
|
798 |
+
logger.info(f"Backdrop matched image: {matched_image_path}")
|
799 |
+
|
800 |
+
# 1) Copy the matched backdrop image itself
|
801 |
+
try:
|
802 |
+
shutil.copy2(
|
803 |
+
matched_image_path,
|
804 |
+
os.path.join(project_folder, matched_filename)
|
805 |
+
)
|
806 |
+
logger.info(f"✅ Copied matched backdrop image {matched_filename} to {project_folder}")
|
807 |
+
except Exception as e:
|
808 |
+
logger.error(f"❌ Failed to copy matched backdrop {matched_image_path}: {e}")
|
809 |
+
|
810 |
+
# 2) Copy other non‐matched files (e.g. extra costumes) into project_folder
|
811 |
+
for fname in os.listdir(matched_folder):
|
812 |
+
if fname in {matched_filename, 'project.json'}:
|
813 |
+
continue
|
814 |
+
src = os.path.join(matched_folder, fname)
|
815 |
+
dst = os.path.join(project_folder, fname)
|
816 |
+
if os.path.isfile(src):
|
817 |
+
try:
|
818 |
+
shutil.copy2(src, dst)
|
819 |
+
logger.info(f"Copied additional backdrop asset {fname} to project folder")
|
820 |
+
except Exception as e:
|
821 |
+
logger.error(f"Failed to copy {src}: {e}")
|
822 |
+
|
823 |
+
# 3) Load and append the Stage target from this backdrop's project.json
|
824 |
+
backdrop_json_path = os.path.join(matched_folder, 'project.json')
|
825 |
+
if os.path.exists(backdrop_json_path):
|
826 |
+
with open(backdrop_json_path, 'r') as f:
|
827 |
+
backdrop_json_data = json.load(f)
|
828 |
+
for target in backdrop_json_data.get("targets", []):
|
829 |
+
if target.get("isStage"):
|
830 |
+
backdrop_data.append(target)
|
831 |
+
else:
|
832 |
+
logger.warning(f"project.json not found in: {matched_folder}")
|
833 |
+
|
834 |
+
# Merge JSON structure
|
835 |
+
final_project = {
|
836 |
+
"targets": [],
|
837 |
+
"monitors": [],
|
838 |
+
"extensions": [],
|
839 |
+
"meta": {
|
840 |
+
"semver": "3.0.0",
|
841 |
+
"vm": "11.3.0",
|
842 |
+
"agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36 Edg/138.0.0.0"
|
843 |
+
}
|
844 |
+
}
|
845 |
+
|
846 |
+
for sprite in project_data:
|
847 |
+
if not sprite.get("isStage", False):
|
848 |
+
final_project["targets"].append(sprite)
|
849 |
+
|
850 |
+
if backdrop_data:
|
851 |
+
all_costumes, sounds = [], []
|
852 |
+
for idx, bd in enumerate(backdrop_data):
|
853 |
+
all_costumes.extend(bd.get("costumes", []))
|
854 |
+
if idx == 0 and "sounds" in bd:
|
855 |
+
sounds = bd["sounds"]
|
856 |
+
final_project["targets"].append({
|
857 |
+
"isStage": True,
|
858 |
+
"name": "Stage",
|
859 |
+
"objName": "Stage",
|
860 |
+
"variables": { "`jEk@4|i[#Fk?(8x)AV.-my variable": ["my variable", 0] },
|
861 |
+
"lists": {},
|
862 |
+
"broadcasts": {},
|
863 |
+
"blocks": {},
|
864 |
+
"comments": {},
|
865 |
+
"currentCostume": 1 if len(all_costumes) > 1 else 0,
|
866 |
+
"costumes": all_costumes,
|
867 |
+
"sounds": sounds,
|
868 |
+
"volume": 100,
|
869 |
+
"layerOrder": 0,
|
870 |
+
"tempo": 60,
|
871 |
+
"videoTransparency": 50,
|
872 |
+
"videoState": "on",
|
873 |
+
"textToSpeechLanguage": None
|
874 |
+
})
|
875 |
+
else:
|
876 |
+
logger.warning("⚠️ No backdrop matched. Using default static backdrop.")
|
877 |
+
default_backdrop_path = os.path.normpath(r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\plain_white.svg")
|
878 |
+
default_backdrop_name = os.path.basename(default_backdrop_path)
|
879 |
+
|
880 |
+
default_backdrop_sound = os.path.normpath(r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\83a9787d4cb6f3b7632b4ddfebf74367.wav")
|
881 |
+
default_backdrop_sound_name = os.path.basename(default_backdrop_sound)
|
882 |
+
|
883 |
+
try:
|
884 |
+
shutil.copy2(default_backdrop_path, os.path.join(project_folder, default_backdrop_name))
|
885 |
+
logger.info(f"✅ Default backdrop copied to project: {default_backdrop_name}")
|
886 |
+
|
887 |
+
shutil.copy2(default_backdrop_sound, os.path.join(project_folder, default_backdrop_sound_name))
|
888 |
+
logger.info(f"✅ Default backdrop sound copied to project: {default_backdrop_sound_name}")
|
889 |
+
|
890 |
+
except Exception as e:
|
891 |
+
logger.error(f"❌ Failed to copy default backdrop assets: {e}")
|
892 |
+
|
893 |
+
final_project["targets"].append({
|
894 |
+
"isStage": True,
|
895 |
+
"name": "Stage",
|
896 |
+
"objName": "Stage",
|
897 |
+
"variables": {},
|
898 |
+
"lists": {},
|
899 |
+
"broadcasts": {},
|
900 |
+
"blocks": {},
|
901 |
+
"comments": {},
|
902 |
+
"currentCostume": 0,
|
903 |
+
"costumes": [
|
904 |
+
{
|
905 |
+
"assetId": default_backdrop_name.split(".")[0],
|
906 |
+
"name": "defaultBackdrop",
|
907 |
+
"md5ext": default_backdrop_name,
|
908 |
+
"dataFormat": "png",
|
909 |
+
"rotationCenterX": 240,
|
910 |
+
"rotationCenterY": 180
|
911 |
+
}
|
912 |
+
],
|
913 |
+
"sounds": [
|
914 |
+
{
|
915 |
+
"name": "pop",
|
916 |
+
"assetId": "83a9787d4cb6f3b7632b4ddfebf74367",
|
917 |
+
"dataFormat": "wav",
|
918 |
+
"format": "",
|
919 |
+
"rate": 48000,
|
920 |
+
"sampleCount": 1123,
|
921 |
+
"md5ext": "83a9787d4cb6f3b7632b4ddfebf74367.wav"
|
922 |
+
}
|
923 |
+
],
|
924 |
+
"volume": 100,
|
925 |
+
"layerOrder": 0,
|
926 |
+
"tempo": 60,
|
927 |
+
"videoTransparency": 50,
|
928 |
+
"videoState": "on",
|
929 |
+
"textToSpeechLanguage": None
|
930 |
+
})
|
931 |
+
|
932 |
+
with open(project_json_path, 'w') as f:
|
933 |
+
json.dump(final_project, f, indent=2)
|
934 |
+
|
935 |
+
validate_and_copy_assets(
|
936 |
+
action_planner_path=r"E:\Pratham\2025\Harsh Sir\Scratch Vision\action_plan.json",
|
937 |
+
project_json_path=project_json_path,
|
938 |
+
project_folder=project_folder,
|
939 |
+
sprite_base_path=sprite_base_path,
|
940 |
+
backdrop_base_path=backdrop_base_path,
|
941 |
+
final_project = final_project
|
942 |
+
)
|
943 |
+
# project_data.extend(extra_sprites)
|
944 |
+
# backdrop_data.extend(extra_backdrops)
|
945 |
+
# logger.info(f"🎉 Final project saved: {project_json_path}")
|
946 |
+
return project_json_path
|
947 |
+
|
948 |
+
def validate_and_copy_assets(action_planner_path, project_json_path, project_folder,sprite_base_path, backdrop_base_path, final_project):
|
949 |
+
# step 1: Load action_plan.json and get the first sprite name after "Stage":{...}
|
950 |
+
with open(action_planner_path, 'r', encoding='utf-8') as f:
|
951 |
+
planner_data = json.load(f)
|
952 |
+
|
953 |
+
script_for = None
|
954 |
+
keys = list(planner_data.keys())
|
955 |
+
if len(keys) > 1:
|
956 |
+
script_for = keys[1]
|
957 |
+
else:
|
958 |
+
print("No sprite name found after 'Stage'")
|
959 |
+
return
|
960 |
+
|
961 |
+
print(f"\n\n=================script_for = '{script_for}'\n\n")
|
962 |
+
|
963 |
+
# step 2: check in project.json if sprite exists
|
964 |
+
sprite_found = False
|
965 |
+
with open(project_json_path, 'r', encoding='utf-8') as f:
|
966 |
+
project_data = json.load(f)
|
967 |
+
|
968 |
+
for target in project_data.get("targets", []):
|
969 |
+
if target.get("name", "").lower() == script_for.lower():
|
970 |
+
sprite_found = True
|
971 |
+
break
|
972 |
+
|
973 |
+
if sprite_found:
|
974 |
+
print(f"'{script_for}' found in project.json – no asset copy needed.")
|
975 |
+
return
|
976 |
+
|
977 |
+
# Step 3: Search in backdrops and sprites folders
|
978 |
+
base_dirs = [sprite_base_path, backdrop_base_path]
|
979 |
+
# project_data = []
|
980 |
+
found_sprites = []
|
981 |
+
# backdrop_data = []
|
982 |
+
found_backdrops = []
|
983 |
+
|
984 |
+
# search in sprites
|
985 |
+
if os.path.exists(sprite_base_path):
|
986 |
+
for sub_dir in os.listdir(sprite_base_path):
|
987 |
+
folder_path = os.path.join(sprite_base_path, sub_dir)
|
988 |
+
sprite_json_path = os.path.join(folder_path, "sprite.json")
|
989 |
+
if os.path.isdir(folder_path) and os.path.exists(sprite_json_path):
|
990 |
+
with open(sprite_json_path, 'r', encoding='utf-8') as f:
|
991 |
+
sprite_info = json.load(f)
|
992 |
+
|
993 |
+
if sprite_info.get("name", "").lower() == script_for.lower():
|
994 |
+
print(f"Found matching sprite in {sprite_json_path}")
|
995 |
+
# Copy all assets except json
|
996 |
+
for fname in os.listdir(folder_path):
|
997 |
+
if fname=='sprite.json':
|
998 |
+
continue
|
999 |
+
shutil.copy2(os.path.join(folder_path, fname), os.path.join(project_folder, fname))
|
1000 |
+
found_sprites.append(sprite_info)
|
1001 |
+
break
|
1002 |
+
|
1003 |
+
# search in backdrops
|
1004 |
+
if os.path.exists(backdrop_base_path):
|
1005 |
+
for sub_dir in os.listdir(backdrop_base_path):
|
1006 |
+
folder_path = os.path.join(backdrop_base_path, sub_dir)
|
1007 |
+
proj_json_path = os.path.join(folder_path, "project.json")
|
1008 |
+
if os.path.isdir(folder_path) and os.path.exists(proj_json_path):
|
1009 |
+
with open(proj_json_path, 'r', encoding='utf-8') as f:
|
1010 |
+
bd_json = json.load(f)
|
1011 |
+
for tgt in bd_json.get("targets", []):
|
1012 |
+
if tgt.get("name", "").lower() == script_for.lower():
|
1013 |
+
print(f"Found matching backdrop in {proj_json_path}")
|
1014 |
+
# Copy all assets except JSON
|
1015 |
+
for fname in os.listdir(folder_path):
|
1016 |
+
if fname == "project.json":
|
1017 |
+
continue
|
1018 |
+
shutil.copy2(os.path.join(folder_path, fname),
|
1019 |
+
os.path.join(project_folder, fname))
|
1020 |
+
found_backdrops.append(tgt)
|
1021 |
+
break
|
1022 |
+
# return found_sprites, found_backdrops
|
1023 |
+
# Merge into final project.json
|
1024 |
+
for spr in found_sprites:
|
1025 |
+
if not spr.get("isStage", False):
|
1026 |
+
final_project["targets"].append(spr)
|
1027 |
+
for bd in found_backdrops:
|
1028 |
+
if bd.get("isStage", False):
|
1029 |
+
final_project["targets"].insert(0, bd)
|
1030 |
+
|
1031 |
+
with open(project_json_path, 'w', encoding='utf-8') as f:
|
1032 |
+
json.dump(final_project, f, indent=2)
|
1033 |
+
|
1034 |
+
if found_sprites or found_backdrops:
|
1035 |
+
print(f"✅ Updated {project_json_path} with missing '{script_for}' assets.")
|
1036 |
+
else:
|
1037 |
+
print(f"⚠️ No matching '{script_for}' assets found in sprites/backdrops.")
|
1038 |
+
|
1039 |
+
# --- ASYNC PDF to Image Conversion ---
|
1040 |
+
async def convert_pdf_to_images_async(pdf_stream: io.BytesIO, dpi=150):
|
1041 |
+
loop = asyncio.get_event_loop()
|
1042 |
+
return await loop.run_in_executor(None, lambda: convert_bytes_to_image(pdf_stream.getvalue(), dpi))
|
1043 |
+
|
1044 |
+
# pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
|
1045 |
+
# output_image_folder = os.path.join(IMAGE_FOLDER_PATH, pdf_name)
|
1046 |
+
# loop = asyncio.get_event_loop()
|
1047 |
+
# with ThreadPoolExecutor() as pool:
|
1048 |
+
# # Pass poppler_path explicitly
|
1049 |
+
# result = await loop.run_in_executor(
|
1050 |
+
# pool, convert_pdf_to_images_sync, pdf_path, output_image_folder, dpi, poppler_path
|
1051 |
+
# )
|
1052 |
+
# return result
|
1053 |
+
|
1054 |
+
def convert_bytes_to_image(pdf_bytes: bytes, dpi: int):
|
1055 |
+
images = convert_from_bytes(pdf_bytes, dpi=dpi, poppler_path=poppler_path)
|
1056 |
+
# Save each page to an in-memory BytesIO and return a list of BytesIOs
|
1057 |
+
buffers = []
|
1058 |
+
for img in images:
|
1059 |
+
buf = BytesIO()
|
1060 |
+
img.save(buf, format="PNG")
|
1061 |
+
buf.seek(0)
|
1062 |
+
buffers.append(buf)
|
1063 |
+
return buffers
|
1064 |
+
|
1065 |
+
# # Blocking version used internally
|
1066 |
+
# def convert_pdf_to_images_sync(pdf_path, output_image_folder, dpi, poppler_path):
|
1067 |
+
# pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
|
1068 |
+
# output_image_folder = os.path.join("outputs", "SCANNED_IMAGE", pdf_name)
|
1069 |
+
# os.makedirs(output_image_folder, exist_ok=True)
|
1070 |
+
|
1071 |
+
# print(f"[INFO] Converting PDF: {pdf_path}")
|
1072 |
+
# print(f"[INFO] Output folder: {output_image_folder}")
|
1073 |
+
# # print(f"[INFO] Using Poppler path: {poppler_path}")
|
1074 |
+
# try:
|
1075 |
+
# images = convert_from_path(pdf_path, dpi=dpi, poppler_path=poppler_path)
|
1076 |
+
# image_paths = []
|
1077 |
+
# for i, img in enumerate(images):
|
1078 |
+
# output_path = os.path.join(output_image_folder, f"page_{i+1}.png")
|
1079 |
+
# img.save(output_path, "PNG")
|
1080 |
+
# print(f"[DEBUG] Saved: {output_path}")
|
1081 |
+
# image_paths.append(output_path)
|
1082 |
+
# return image_paths
|
1083 |
+
# except PDFInfoNotInstalledError as e:
|
1084 |
+
# raise RuntimeError(f"Poppler not installed or path incorrect: {str(e)}")
|
1085 |
+
# except Exception as e:
|
1086 |
+
# print(f"[ERROR] Failed to convert PDF: {e}")
|
1087 |
+
# raise
|
1088 |
+
|
1089 |
+
# def delay_for_tpm_node(state: GameState):
|
1090 |
+
# logger.info("--- Running DelayForTPMNode ---")
|
1091 |
+
# time.sleep(10) # Adjust the delay as needed
|
1092 |
+
# logger.info("Delay completed.")
|
1093 |
+
# return state
|
1094 |
+
|
1095 |
+
# # Build the LangGraph workflow
|
1096 |
+
# workflow = StateGraph(GameState)
|
1097 |
+
|
1098 |
+
# # Add all nodes to the workflow
|
1099 |
+
# workflow.add_node("timer_delay",delay_for_tpm_node)
|
1100 |
+
# workflow.add_node("opcode_counter", plan_logic_aligner_node)
|
1101 |
+
# workflow.set_entry_point("timer_delay")
|
1102 |
+
# workflow.add_edge("timer_delay","opcode_counter")
|
1103 |
+
# workflow.add_edge("opcode_counter", END)
|
1104 |
+
# app_graph = workflow.compile()
|
1105 |
+
|
1106 |
+
# def get_desc_pseudo(image_buf: BytesIO, pseudo_store: dict, project_folder: str):
|
1107 |
+
# """
|
1108 |
+
# Takes a path to a code-block image and returns a dict with:
|
1109 |
+
# - 'pseudo_code': pseudo-code representing logic in Scratch block format
|
1110 |
+
# Stores the output into outputs/pseudo_output.json
|
1111 |
+
# """
|
1112 |
+
# try:
|
1113 |
+
# # Load image and encode to base64
|
1114 |
+
# # with open(image_buf, "rb") as image_file:
|
1115 |
+
# # image_bytes = image_file.read()
|
1116 |
+
# img_base64 = base64.b64encode(image_buf.getvalue()).decode()
|
1117 |
+
# # pseudo_store[image_key] = pseudo
|
1118 |
+
|
1119 |
+
# # === Call LangGraph workflow (auto triggers plan_logic_aligner_node) ===
|
1120 |
+
# state = app_graph.invoke({"image": img_base64})
|
1121 |
+
# # { comment: to solve pseudo_output issue
|
1122 |
+
# # logic_refined = state.get("pseudo_node", {}).get("refined_logic", {})
|
1123 |
+
# pseudo_node = state.get("pseudo_node", {})
|
1124 |
+
|
1125 |
+
# name_variable = pseudo_node.get("name_variable", "Unknown")
|
1126 |
+
# pseudocode = pseudo_node.get("pseudocode", "No logic extracted")
|
1127 |
+
|
1128 |
+
# pseudo_store.setdefault(name_variable, []).append({"pseudo_code": pseudocode})
|
1129 |
+
|
1130 |
+
# # --- Extract fields ---
|
1131 |
+
# # refined = logic_refined.get("refined_logic", {})
|
1132 |
+
# # {comment: to solve pseudo_output.json issue}
|
1133 |
+
# # name_variable = logic_refined.get("name_variable", "Unknown")
|
1134 |
+
# # pseudo_code_raw = logic_refined.get("pseudocode", "No logic extracted")
|
1135 |
+
# # if pseudo_store is not None:
|
1136 |
+
# # pseudo_store.setdefault(name_variable, []).append({"pseudo_code": pseudo_code_raw})
|
1137 |
+
# #}
|
1138 |
+
# with open(os.path.join(project_folder, "pseudo_output.json"), "w") as f:
|
1139 |
+
# json.dump(pseudo_store, f, indent=2)
|
1140 |
+
|
1141 |
+
# result = {
|
1142 |
+
# "name_variable": name_variable,
|
1143 |
+
# "pseudo_code": pseudocode
|
1144 |
+
# }
|
1145 |
+
# return result
|
1146 |
+
# except Exception as e:
|
1147 |
+
# logger.error(f"❌ get_desc_pseudo failed for {image_buf}: {e}")
|
1148 |
+
# return {
|
1149 |
+
# "error": str(e)
|
1150 |
+
# }
|
1151 |
+
|
1152 |
+
# ============== Helper function to Upscale an Image ============== #
|
1153 |
+
def upscale_image(image: Image.Image, scale: int = 2) -> Image.Image:
|
1154 |
+
"""
|
1155 |
+
Upscales a PIL image by a given scale factor.
|
1156 |
+
"""
|
1157 |
+
try:
|
1158 |
+
width, height = image.size
|
1159 |
+
new_size = (width * scale, height * scale)
|
1160 |
+
upscaled_image = image.resize(new_size, Image.LANCZOS)
|
1161 |
+
logger.info(f"✅ Upscaled image to {new_size}")
|
1162 |
+
return upscaled_image
|
1163 |
+
except Exception as e:
|
1164 |
+
logger.error(f"❌ Error during image upscaling: {str(e)}")
|
1165 |
+
return image
|
1166 |
+
|
1167 |
+
@app.route('/')
|
1168 |
+
def index():
|
1169 |
+
return render_template('app_index.html')
|
1170 |
+
|
1171 |
+
# API endpoint
|
1172 |
+
@app.route('/process_pdf', methods=['POST'])
|
1173 |
+
async def process_pdf():
|
1174 |
+
start_time = time.time()
|
1175 |
+
try:
|
1176 |
+
logger.info("Received request to process PDF.")
|
1177 |
+
if 'pdf_file' not in request.files:
|
1178 |
+
logger.warning("No PDF file found in request.")
|
1179 |
+
return jsonify({"error": "Missing PDF file in form-data with key 'pdf_file'"}), 400
|
1180 |
+
|
1181 |
+
pdf_file = request.files['pdf_file']
|
1182 |
+
if pdf_file.filename == '':
|
1183 |
+
return jsonify({"error": "Empty filename"}), 400
|
1184 |
+
|
1185 |
+
# ================================================= #
|
1186 |
+
# Generate Random UUID for project folder name #
|
1187 |
+
# ================================================= #
|
1188 |
+
random_id = str(uuid.uuid4()).replace('-', '')
|
1189 |
+
project_folder = os.path.join("outputs", f"project_{random_id}")
|
1190 |
+
|
1191 |
+
# =========================================================================== #
|
1192 |
+
# Create empty json in project_{random_id} folder #
|
1193 |
+
# =========================================================================== #
|
1194 |
+
os.makedirs(project_folder, exist_ok=True)
|
1195 |
+
|
1196 |
+
# Save the uploaded PDF temporarily
|
1197 |
+
# filename = secure_filename(pdf_file.filename)
|
1198 |
+
# # temp_dir = tempfile.mkdtemp()
|
1199 |
+
# # saved_pdf_path = os.path.join(temp_dir, filename)
|
1200 |
+
# saved_pdf_path = os.path.join(DETECTED_IMAGE_FOLDER_PATH, filename)
|
1201 |
+
# pdf_file.save(saved_pdf_path)
|
1202 |
+
|
1203 |
+
pdf_bytes = pdf_file.read()
|
1204 |
+
pdf_stream = io.BytesIO(pdf_bytes)
|
1205 |
+
# logger.info(f"Created project folder: {project_folder}")
|
1206 |
+
logger.info(f"Saved uploaded PDF to: {pdf_stream}")
|
1207 |
+
|
1208 |
+
# Extract & process
|
1209 |
+
json_path = None
|
1210 |
+
# output_path, result = extract_images_from_pdf(saved_pdf_path, json_path)
|
1211 |
+
# print(f"\n\n OUTPUT_PATH: \n{output_path}\n")
|
1212 |
+
manipulated_sprites = extract_images_from_pdf(pdf_stream)
|
1213 |
+
# print(f"\nRESULT: {result}\n")
|
1214 |
+
# project_output = similarity_matching(output_path, project_folder)
|
1215 |
+
project_output = similarity_matching(manipulated_sprites, project_folder)
|
1216 |
+
|
1217 |
+
# Call the async function from sync code
|
1218 |
+
try:
|
1219 |
+
image_paths = await convert_pdf_to_images_async(pdf_stream)
|
1220 |
+
print("PDF converted to images:", image_paths)
|
1221 |
+
|
1222 |
+
# # Create an in-memory store for pseudo-codes
|
1223 |
+
# pseudo_store = {}
|
1224 |
+
# [get_desc_pseudo(img_buf, pseudo_store, project_folder) for img_buf in image_paths]
|
1225 |
+
except Exception as e:
|
1226 |
+
print(f"Error processing PDF: {e}")
|
1227 |
+
|
1228 |
+
# Convert in-memory images to base64 strings
|
1229 |
+
scanned_images_b64 = [
|
1230 |
+
base64.b64encode(buf.getvalue()).decode("utf-8")
|
1231 |
+
for buf in image_paths
|
1232 |
+
]
|
1233 |
+
total_time = time.time() - start_time # ⏳ End timer
|
1234 |
+
logger.info(f"⏱ Total processing time for PDF: {total_time:.2f} seconds")
|
1235 |
+
return jsonify({
|
1236 |
+
"message": "✅ PDF processed successfully",
|
1237 |
+
# "output_json": output_path,
|
1238 |
+
# "sprites": filtered_sprites,
|
1239 |
+
"project_output_json": project_output,
|
1240 |
+
"scanned_images": scanned_images_b64,
|
1241 |
+
# "scanned_image_pseudo": pseudo_results
|
1242 |
+
})
|
1243 |
+
except Exception as e:
|
1244 |
+
logger.exception("❌ Failed to process PDF")
|
1245 |
+
return jsonify({"error": f"❌ Failed to process PDF: {str(e)}"}), 500
|
1246 |
+
|
1247 |
+
if __name__ == '__main__':
|
1248 |
+
app.run(host='0.0.0.0', port=7860, debug=True)
|
app_main copy.py
ADDED
@@ -0,0 +1,1400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, Response, flash, redirect, url_for, request, jsonify
|
2 |
+
import cv2, json,base64,io,os,tempfile,torch,logging, re
|
3 |
+
import numpy as np
|
4 |
+
from unstructured.partition.pdf import partition_pdf
|
5 |
+
from PIL import Image, ImageEnhance, ImageDraw
|
6 |
+
# from imutils.perspective import four_point_transform
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import pytesseract
|
9 |
+
# from transformers import AutoProcessor, AutoModelForImageTextToText, AutoModelForVision2Seq
|
10 |
+
from langchain_community.document_loaders.image_captions import ImageCaptionLoader
|
11 |
+
from werkzeug.utils import secure_filename
|
12 |
+
from langchain_groq import ChatGroq
|
13 |
+
from langgraph.prebuilt import create_react_agent
|
14 |
+
from pdf2image import convert_from_path
|
15 |
+
import asyncio
|
16 |
+
from concurrent.futures import ThreadPoolExecutor
|
17 |
+
from pdf2image.exceptions import PDFInfoNotInstalledError
|
18 |
+
from typing import Dict, TypedDict, Optional, Any
|
19 |
+
from langgraph.graph import StateGraph, END
|
20 |
+
import uuid
|
21 |
+
import shutil, time
|
22 |
+
from langchain_experimental.open_clip.open_clip import OpenCLIPEmbeddings
|
23 |
+
# from matplotlib.offsetbox import OffsetImage, AnnotationBbox
|
24 |
+
from io import BytesIO
|
25 |
+
|
26 |
+
# ============================== #
|
27 |
+
# INITIALIZE CLIP EMBEDDER #
|
28 |
+
# ============================== #
|
29 |
+
clip_embd = OpenCLIPEmbeddings()
|
30 |
+
|
31 |
+
# Configure logging
|
32 |
+
logging.basicConfig(
|
33 |
+
level=logging.DEBUG, # Use INFO or ERROR in production
|
34 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
35 |
+
handlers=[
|
36 |
+
logging.FileHandler("app.log"),
|
37 |
+
logging.StreamHandler()
|
38 |
+
]
|
39 |
+
)
|
40 |
+
|
41 |
+
logger = logging.getLogger(__name__)
|
42 |
+
|
43 |
+
load_dotenv()
|
44 |
+
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
45 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
46 |
+
|
47 |
+
llm = ChatGroq(
|
48 |
+
model="meta-llama/llama-4-scout-17b-16e-instruct",
|
49 |
+
temperature=0,
|
50 |
+
max_tokens=None,
|
51 |
+
)
|
52 |
+
|
53 |
+
app = Flask(__name__)
|
54 |
+
|
55 |
+
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
|
56 |
+
poppler_path = r"C:\poppler-23.11.0\Library\bin"
|
57 |
+
|
58 |
+
count = 0
|
59 |
+
PDF_GET = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\scratch_crab.pdf"
|
60 |
+
|
61 |
+
OUTPUT_FOLDER = "OUTPUTS"
|
62 |
+
DETECTED_IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "DETECTED_IMAGE")
|
63 |
+
IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "SCANNED_IMAGE")
|
64 |
+
JSON_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "EXTRACTED_JSON")
|
65 |
+
|
66 |
+
|
67 |
+
for path in [OUTPUT_FOLDER, IMAGE_FOLDER_PATH, DETECTED_IMAGE_FOLDER_PATH, JSON_FOLDER_PATH]:
|
68 |
+
os.makedirs(path, exist_ok=True)
|
69 |
+
|
70 |
+
# # Model Initialization
|
71 |
+
# try:
|
72 |
+
# smolvlm256m_processor = AutoProcessor.from_pretrained(
|
73 |
+
# "HuggingFaceTB/SmolVLM-256M-Instruct")
|
74 |
+
# # smolvlm256m_model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM-256M-Instruct").to("cpu")
|
75 |
+
# smolvlm256m_model = AutoModelForVision2Seq.from_pretrained(
|
76 |
+
# "HuggingFaceTB/SmolVLM-256M-Instruct",
|
77 |
+
# torch_dtype=torch.bfloat16 if hasattr(
|
78 |
+
# torch, "bfloat16") else torch.float32,
|
79 |
+
# _attn_implementation="eager"
|
80 |
+
# ).to("cpu")
|
81 |
+
# except Exception as e:
|
82 |
+
# raise RuntimeError(f"❌ Failed to load SmolVLM model: {str(e)}")
|
83 |
+
|
84 |
+
# SmolVLM Image Captioning functioning
|
85 |
+
# def get_smolvlm_caption(image: Image.Image, prompt: str = "") -> str:
|
86 |
+
# try:
|
87 |
+
# # Ensure exactly one <image> token
|
88 |
+
# if "<image>" not in prompt:
|
89 |
+
# prompt = f"<image> {prompt.strip()}"
|
90 |
+
|
91 |
+
# num_image_tokens = prompt.count("<image>")
|
92 |
+
# if num_image_tokens != 1:
|
93 |
+
# raise ValueError(
|
94 |
+
# f"Prompt must contain exactly 1 <image> token. Found {num_image_tokens}")
|
95 |
+
|
96 |
+
# inputs = smolvlm256m_processor(
|
97 |
+
# images=[image], text=[prompt], return_tensors="pt").to("cpu")
|
98 |
+
# output_ids = smolvlm256m_model.generate(**inputs, max_new_tokens=100)
|
99 |
+
# return smolvlm256m_processor.decode(output_ids[0], skip_special_tokens=True)
|
100 |
+
# except Exception as e:
|
101 |
+
# return f"❌ Error during caption generation: {str(e)}"
|
102 |
+
|
103 |
+
def classify_image_type(description_or_name: str) -> str:
|
104 |
+
desc = description_or_name.lower()
|
105 |
+
|
106 |
+
sprite_keywords = ["sprite", "character", "animal", "person", "creature", "robot", "figure"]
|
107 |
+
backdrop_keywords = ["background", "scene", "forest", "city", "room", "sky", "mountain", "village"]
|
108 |
+
code_block_keywords = [
|
109 |
+
"move", "turn", "wait", "repeat", "if", "else", "broadcast",
|
110 |
+
"glide", "change", "forever", "when", "switch", "costume",
|
111 |
+
"say", "think", "stop", "clone", "touching", "sensing",
|
112 |
+
"scratch", "block", "code", "set", "variable"
|
113 |
+
]
|
114 |
+
|
115 |
+
if any(kw in desc for kw in code_block_keywords):
|
116 |
+
return "code-block"
|
117 |
+
elif any(kw in desc for kw in sprite_keywords):
|
118 |
+
return "sprite"
|
119 |
+
elif any(kw in desc for kw in backdrop_keywords):
|
120 |
+
return "backdrop"
|
121 |
+
else:
|
122 |
+
return "unknown"
|
123 |
+
|
124 |
+
class GameState(TypedDict):
|
125 |
+
# project_json: dict
|
126 |
+
# description: str
|
127 |
+
# project_id: str
|
128 |
+
image: str
|
129 |
+
pseudo_node: Optional[Dict]
|
130 |
+
|
131 |
+
# Refined SYSTEM_PROMPT with more explicit Scratch JSON rules, especially for variables
|
132 |
+
SYSTEM_PROMPT = """
|
133 |
+
You are an expert AI assistant named GameScratchAgent, specialized in generating and modifying Scratch-VM 3.x game project JSON.
|
134 |
+
Your core task is to process game descriptions and existing Scratch JSON structures, then produce or update JSON segments accurately.
|
135 |
+
You possess deep knowledge of Scratch 3.0 project schema, informed by comprehensive reference materials. When generating or modifying the `blocks` section, pay extremely close attention to the following:
|
136 |
+
|
137 |
+
**Scratch Project JSON Schema Rules:**
|
138 |
+
|
139 |
+
1. **Target Structure (`project.json`'s `targets` array):**
|
140 |
+
* Each object in the `targets` array represents a Stage or a Sprite.
|
141 |
+
* `isStage`: A boolean indicating if the target is the Stage (`true`) or a Sprite (`false`).
|
142 |
+
* `name`: The name of the Stage (e.g., `"Stage"`) or the Sprite (e.g., `"Cat"`). This property replaces `objName` found in older Scratch versions.
|
143 |
+
* `variables` dictionary: This dictionary maps unique variable IDs to arrays `[variable_name, initial_value, isCloudVariable?]`.
|
144 |
+
* `variable_name`: The user-defined name of the variable.
|
145 |
+
* `initial_value`: The variable's initial value, which can be a number or a string.
|
146 |
+
* `isCloudVariable?`: (Optional) A boolean indicating if it's a cloud variable (`true`) or a local variable (`false` or absent for regular variables).
|
147 |
+
* Example: `"myVarId123": ["score", 0]`, `"cloudVarId456": ["☁ High Score", "54", true]`
|
148 |
+
* `lists` dictionary: This dictionary maps unique list IDs to arrays `[list_name, [item1, item2, ...]]`.
|
149 |
+
* Example: `"myListId789": ["my list", ["apple", "banana"]]`
|
150 |
+
* `broadcasts` dictionary: This dictionary maps unique broadcast IDs to their names.
|
151 |
+
* Example: `"myBroadcastId": "Game Over"`
|
152 |
+
* `blocks` dictionary: This dictionary contains all the blocks belonging to this target. Keys are block IDs, values are block objects.
|
153 |
+
|
154 |
+
2. **Block Structure (within a `target`'s `blocks` dictionary):**
|
155 |
+
* Every block object must have the following core properties:
|
156 |
+
* [cite_start]`opcode`: A unique internal identifier for the block's specific functionality (e.g., `"motion_movesteps"`, `"event_whenflagclicked"`)[cite: 31, 18, 439, 452].
|
157 |
+
* `parent`: The ID of the block directly above it in the script stack (or `null` for a top-level block).
|
158 |
+
* `next`: The ID of the block directly below it in the script stack (or `null` for the end of a stack).
|
159 |
+
* `inputs`: An object defining values or blocks plugged into the block's input slots. Values are **arrays**.
|
160 |
+
* `fields`: An object defining dropdown menu selections or direct internal values within the block. Values are **arrays**.
|
161 |
+
* `shadow`: `true` if it's a shadow block (e.g., a default number input that can be replaced by another block), `false` otherwise.
|
162 |
+
* `topLevel`: `true` if it's a hat block or a standalone block (not connected to a parent), `false` otherwise.
|
163 |
+
|
164 |
+
3. **`inputs` Property Details (for blocks plugged into input slots):**
|
165 |
+
* **Direct Block Connection (Reporter/Boolean block plugged in):**
|
166 |
+
* Format: `"<INPUT_NAME>": [1, "<blockId_of_plugged_block>"]`
|
167 |
+
* Example: `"CONDITION": [1, "someBooleanBlockId"]` (e.g., for an `if` block).
|
168 |
+
* **Literal Value Input (Shadow block with a literal):**
|
169 |
+
* Format: `"<INPUT_NAME>": [1, [<type_code>, "<value_string>"]]`
|
170 |
+
* `type_code`: A numeric code representing the data type. Common codes include: `4` for number, `7` for string/text, `10` for string/message.
|
171 |
+
* `value_string`: The literal value as a string.
|
172 |
+
* Examples:
|
173 |
+
* Number: `"STEPS": [1, [4, "10"]]` (for `move 10 steps` block).
|
174 |
+
* String/Text: `"MESSAGE": [1, [7, "Hello"]]` (for `say Hello` block).
|
175 |
+
* String/Message (common for text inputs): `"MESSAGE": [1, [10, "Hello!"]]` (for `say Hello! for 2 secs`).
|
176 |
+
* **C-Block Substack (blocks within a loop or conditional):**
|
177 |
+
* Format: `"<SUBSTACK_NAME>": [2, "<blockId_of_first_block_in_substack>"]`
|
178 |
+
* Common `SUBSTACK_NAME` values are `SUBSTACK` (for `if`, `forever`, `repeat`) and `SUBSTACK2` (for `else` in `if else`).
|
179 |
+
* Example: `"SUBSTACK": [2, "firstBlockInLoopId"]`
|
180 |
+
|
181 |
+
4. **`fields` Property Details (for dropdowns or direct internal values):**
|
182 |
+
* Used for dropdown menus, variable names, list names, or other static selections directly within the block.
|
183 |
+
* Format: `"<FIELD_NAME>": ["<selected_value>", null]`
|
184 |
+
* Examples:
|
185 |
+
* Dropdown: `"KEY_OPTION": ["space", null]` (for `when space key pressed`).
|
186 |
+
* Variable Name: `"VARIABLE": ["score", null]` (for `set score to 0`).
|
187 |
+
* Direction (specific motion block): `"FORWARD_BACKWARD": ["forward", null]` (for `go forward layers`).
|
188 |
+
|
189 |
+
5. **Unique IDs:**
|
190 |
+
* All block IDs, variable IDs, and list IDs must be unique strings (e.g., "myBlock123", "myVarId456", "myListId789"). Do NOT use placeholder strings like "block_id_here".
|
191 |
+
|
192 |
+
6. **No Nested `blocks` Dictionary:**
|
193 |
+
* The `blocks` dictionary should only appear once per `target` (sprite/stage). Do NOT nest a `blocks` dictionary inside an individual block definition. Blocks that are part of a substack are linked via the `SUBSTACK` input.
|
194 |
+
|
195 |
+
7. **Asset Properties (for Costumes/Sounds):**
|
196 |
+
* `assetId`, `md5ext`, `bitmapResolution`, `rotationCenterX`/`rotationCenterY` should be correctly associated with costume and sound objects within the `costumes` and `sounds` arrays.
|
197 |
+
|
198 |
+
**General Principles and Important Considerations:**
|
199 |
+
* **Backward Compatibility:** Adhere strictly to existing Scratch 3.0 opcodes and schema to ensure backward compatibility with older projects. [cite_start]Opcodes must remain consistent to prevent previously saved projects from failing to load or behaving unexpectedly[cite: 18, 19, 25, 65].
|
200 |
+
* **Forgiving Inputs:** Recognize that Scratch is designed to be "forgiving in its interpretation of inputs." [cite_start]The Scratch VM handles potentially "invalid" inputs gracefully (e.g., converting a number to a string if expected, returning default values like zero or empty strings, or performing no action) rather than crashing[cite: 20, 21, 22, 38, 39, 41]. This implies that precise type matching for inputs might be handled internally by Scratch, allowing for some flexibility in how values are provided, but the agent should aim for the most common and logical type.
|
201 |
+
"""
|
202 |
+
|
203 |
+
SYSTEM_PROMPT_JSON_CORRECTOR ="""
|
204 |
+
You are an assistant that outputs JSON responses strictly following the given schema.
|
205 |
+
If the JSON you produce has any formatting errors, missing required fields, or invalid structure, you must identify the problems and correct them.
|
206 |
+
Always return only valid JSON that fully conforms to the schema below, enclosed in triple backticks (```), without any extra text or explanation.
|
207 |
+
|
208 |
+
If you receive an invalid or incomplete JSON response, fix it by:
|
209 |
+
- Adding any missing required fields with appropriate values.
|
210 |
+
- Correcting syntax errors such as missing commas, brackets, or quotes.
|
211 |
+
- Ensuring the JSON structure matches the schema exactly.
|
212 |
+
|
213 |
+
Remember: Your output must be valid JSON only, ready to be parsed without errors.
|
214 |
+
"""
|
215 |
+
# debugger and resolver agent for Scratch 3.0
|
216 |
+
agent_json_resolver = create_react_agent(
|
217 |
+
model=llm,
|
218 |
+
tools=[], # No specific tools are defined here, but could be added later
|
219 |
+
prompt=SYSTEM_PROMPT_JSON_CORRECTOR
|
220 |
+
)
|
221 |
+
|
222 |
+
# Helper function to load the block catalog from a JSON file
|
223 |
+
def _load_block_catalog(file_path: str) -> Dict:
|
224 |
+
"""Loads the Scratch block catalog from a specified JSON file."""
|
225 |
+
try:
|
226 |
+
with open(file_path, 'r') as f:
|
227 |
+
catalog = json.load(f)
|
228 |
+
logger.info(f"Successfully loaded block catalog from {file_path}")
|
229 |
+
return catalog
|
230 |
+
except FileNotFoundError:
|
231 |
+
logger.error(f"Error: Block catalog file not found at {file_path}")
|
232 |
+
# Return an empty dict or raise an error, depending on desired behavior
|
233 |
+
return {}
|
234 |
+
except json.JSONDecodeError as e:
|
235 |
+
logger.error(f"Error decoding JSON from {file_path}: {e}")
|
236 |
+
return {}
|
237 |
+
except Exception as e:
|
238 |
+
logger.error(f"An unexpected error occurred while loading {file_path}: {e}")
|
239 |
+
return {}
|
240 |
+
|
241 |
+
# --- Global variable for the block catalog ---
|
242 |
+
ALL_SCRATCH_BLOCKS_CATALOG = {}
|
243 |
+
BLOCK_CATALOG_PATH = r"blocks\blocks.json" # Define the path to your JSON file
|
244 |
+
HAT_BLOCKS_PATH = r"blocks\hat_blocks.json" # Path to the hat blocks JSON file
|
245 |
+
STACK_BLOCKS_PATH = r"blocks\stack_blocks.json" # Path to the stack blocks JSON file
|
246 |
+
REPORTER_BLOCKS_PATH = r"blocks\reporter_blocks.json" # Path to the reporter blocks JSON file
|
247 |
+
BOOLEAN_BLOCKS_PATH = r"blocks\boolean_blocks.json" # Path to the boolean blocks JSON file
|
248 |
+
C_BLOCKS_PATH = r"blocks\c_blocks.json" # Path to the C blocks JSON file
|
249 |
+
CAP_BLOCKS_PATH = r"blocks\cap_blocks.json" # Path to the cap blocks JSON file
|
250 |
+
|
251 |
+
# Load the block catalogs from their respective JSON files
|
252 |
+
hat_block_data = _load_block_catalog(HAT_BLOCKS_PATH)
|
253 |
+
hat_description = hat_block_data["description"]
|
254 |
+
# hat_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']}" for block in hat_block_data["blocks"]])
|
255 |
+
hat_opcodes_functionalities = os.path.join(HAT_BLOCKS_PATH, "hat_blocks.txt")
|
256 |
+
print(f"\nhat_opcodes_functionalities:\n {hat_opcodes_functionalities}\n")
|
257 |
+
|
258 |
+
boolean_block_data = _load_block_catalog(BOOLEAN_BLOCKS_PATH)
|
259 |
+
boolean_description = boolean_block_data["description"]
|
260 |
+
# boolean_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']}" for block in boolean_block_data["blocks"]])
|
261 |
+
boolean_opcodes_functionalities = os.path.join(BOOLEAN_BLOCKS_PATH, "boolean_blocks.txt")
|
262 |
+
print(f"\n\n\nboolean_opcodes_functionalities:\n {boolean_opcodes_functionalities}")
|
263 |
+
|
264 |
+
c_block_data = _load_block_catalog(C_BLOCKS_PATH)
|
265 |
+
c_description = c_block_data["description"]
|
266 |
+
# c_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']}" for block in c_block_data["blocks"]])
|
267 |
+
c_opcodes_functionalities = os.path.join(C_BLOCKS_PATH, "c_blocks.txt")
|
268 |
+
print(f"c_opcodes_functionalities:\n\n{c_opcodes_functionalities}\n")
|
269 |
+
|
270 |
+
cap_block_data = _load_block_catalog(CAP_BLOCKS_PATH)
|
271 |
+
cap_description = cap_block_data["description"]
|
272 |
+
# cap_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']}" for block in cap_block_data["blocks"]])
|
273 |
+
cap_opcodes_functionalities = os.path.join(CAP_BLOCKS_PATH, "cap_blocks.txt")
|
274 |
+
print(f"cap_opcodes_functionalities:\n\n{cap_opcodes_functionalities}\n")
|
275 |
+
|
276 |
+
reporter_block_data = _load_block_catalog(REPORTER_BLOCKS_PATH)
|
277 |
+
reporter_description = reporter_block_data["description"]
|
278 |
+
# reporter_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']}" for block in reporter_block_data["blocks"]])
|
279 |
+
reporter_opcodes_functionalities = os.path.join(REPORTER_BLOCKS_PATH, "reporter_blocks.txt")
|
280 |
+
print(f"reporter_opcodes_functionalities:\n\n{reporter_opcodes_functionalities}\n")
|
281 |
+
|
282 |
+
stack_block_data = _load_block_catalog(STACK_BLOCKS_PATH)
|
283 |
+
stack_description = stack_block_data["description"]
|
284 |
+
# stack_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} e.g. {block['example_standalone']}" for block in stack_block_data["blocks"]])
|
285 |
+
stack_opcodes_functionalities = os.path.join(STACK_BLOCKS_PATH, "stack_blocks.txt")
|
286 |
+
print(f"stack_opcodes_functionalities:\n\n{stack_opcodes_functionalities}\n")
|
287 |
+
|
288 |
+
# This makes ALL_SCRATCH_BLOCKS_CATALOG available globally
|
289 |
+
# ALL_SCRATCH_BLOCKS_CATALOG = _load_block_catalog(BLOCK_CATALOG_PATH)
|
290 |
+
|
291 |
+
# Helper function to extract JSON from LLM response
|
292 |
+
def extract_json_from_llm_response(raw_response: str) -> dict:
|
293 |
+
# --- 1) Pull out the JSON code‑block if present ---
|
294 |
+
md = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", raw_response)
|
295 |
+
json_string = md.group(1).strip() if md else raw_response
|
296 |
+
|
297 |
+
# --- 2) Trim to the outermost { … } so we drop any prefix/suffix junk ---
|
298 |
+
first, last = json_string.find('{'), json_string.rfind('}')
|
299 |
+
if 0 <= first < last:
|
300 |
+
json_string = json_string[first:last+1]
|
301 |
+
|
302 |
+
# --- 3) PRE‑CLEANUP: remove stray assistant{…}, rogue assistant keys, fix boolean quotes ---
|
303 |
+
json_string = re.sub(r'\b\w+\s*{', '{', json_string)
|
304 |
+
json_string = re.sub(r'"assistant"\s*:', '', json_string)
|
305 |
+
json_string = re.sub(r'\b(false|true)"', r'\1', json_string)
|
306 |
+
logger.debug("Ran pre‑cleanup for stray tokens and boolean quotes.")
|
307 |
+
|
308 |
+
# --- 3.1) Fix stray inner quotes at start of name/list values ---
|
309 |
+
# e.g., { "name": " \"recent_scoress\"", ... } → "recent_scoress"
|
310 |
+
json_string = re.sub(
|
311 |
+
r'("name"\s*:\s*")\s*"',
|
312 |
+
r'\1',
|
313 |
+
json_string
|
314 |
+
)
|
315 |
+
|
316 |
+
# --- 4) Escape all embedded quotes in any `logic` value up to the next key ---
|
317 |
+
def _esc(m):
|
318 |
+
prefix, body = m.group(1), m.group(2)
|
319 |
+
return prefix + body.replace('"', r'\"')
|
320 |
+
json_string = re.sub(
|
321 |
+
r'("logic"\s*:\s*")([\s\S]+?)(?=",\s*"[A-Za-z_]\w*"\s*:\s*)',
|
322 |
+
_esc,
|
323 |
+
json_string
|
324 |
+
)
|
325 |
+
logger.debug("Escaped embedded quotes in logic fields.")
|
326 |
+
|
327 |
+
logger.debug("Quoted unquoted keys.")
|
328 |
+
|
329 |
+
# --- 6) Remove trailing commas before } or ] ---
|
330 |
+
json_string = re.sub(r',\s*(?=[}\],])', '', json_string)
|
331 |
+
json_string = re.sub(r',\s*,', ',', json_string)
|
332 |
+
logger.debug("Removed trailing commas.")
|
333 |
+
|
334 |
+
# --- 7) Balance braces: drop extra } at end if needed ---
|
335 |
+
ob, cb = json_string.count('{'), json_string.count('}')
|
336 |
+
if cb > ob:
|
337 |
+
excess = cb - ob
|
338 |
+
json_string = json_string.rstrip()[:-excess]
|
339 |
+
logger.debug(f"Stripped {excess} extra closing brace(s).")
|
340 |
+
|
341 |
+
# --- 8) Escape literal newlines in *all* string values ---
|
342 |
+
json_string = re.sub(
|
343 |
+
r'"((?:[^"\\]|\\.)*?)"',
|
344 |
+
lambda m: '"' + m.group(1).replace('\n', '\\n').replace('\r', '\\r') + '"',
|
345 |
+
json_string,
|
346 |
+
flags=re.DOTALL
|
347 |
+
)
|
348 |
+
logger.debug("Escaped newlines in strings.")
|
349 |
+
|
350 |
+
# --- 9) Final parse attempt ---
|
351 |
+
try:
|
352 |
+
return json.loads(json_string)
|
353 |
+
except json.JSONDecodeError:
|
354 |
+
logger.error("Sanitized JSON still invalid:\n%s", json_string)
|
355 |
+
raise
|
356 |
+
|
357 |
+
# Main agent of the system agent for Scratch 3.0
|
358 |
+
agent = create_react_agent(
|
359 |
+
model=llm,
|
360 |
+
tools=[], # No specific tools are defined here, but could be added later
|
361 |
+
prompt=SYSTEM_PROMPT
|
362 |
+
)
|
363 |
+
|
364 |
+
# Node 6: Logic updating if any issue here
|
365 |
+
def plan_logic_aligner_node(state: GameState):
|
366 |
+
logger.info("--- Running plan_logic_aligner_node ---")
|
367 |
+
|
368 |
+
image = state.get("image", "")
|
369 |
+
|
370 |
+
refinement_prompt = f"""
|
371 |
+
You are an expert in Scratch 3.0 game development, specializing in understanding block relationships (stacked, nested).
|
372 |
+
"Analyze the Scratch code-block image and generate Pseudo-Code for what this logic appears to be doing."
|
373 |
+
From Image, you also have to detect a value of Key given in Text form "Script for: ". Below is the example
|
374 |
+
Example: "Script for: Bear", "Script for:" is a key and "Bear" is value.
|
375 |
+
--- Scratch 3.0 Block Reference ---
|
376 |
+
### Hat Blocks
|
377 |
+
Description: {hat_description}
|
378 |
+
Blocks:
|
379 |
+
{hat_opcodes_functionalities}
|
380 |
+
|
381 |
+
### Boolean Blocks
|
382 |
+
Description: {boolean_description}
|
383 |
+
Blocks:
|
384 |
+
{boolean_opcodes_functionalities}
|
385 |
+
|
386 |
+
### C Blocks
|
387 |
+
Description: {c_description}
|
388 |
+
Blocks:
|
389 |
+
{c_opcodes_functionalities}
|
390 |
+
|
391 |
+
### Cap Blocks
|
392 |
+
Description: {cap_description}
|
393 |
+
Blocks:
|
394 |
+
{cap_opcodes_functionalities}
|
395 |
+
|
396 |
+
### Reporter Blocks
|
397 |
+
Description: {reporter_description}
|
398 |
+
Blocks:
|
399 |
+
{reporter_opcodes_functionalities}
|
400 |
+
|
401 |
+
### Stack Blocks
|
402 |
+
Description: {stack_description}
|
403 |
+
Blocks:
|
404 |
+
{stack_opcodes_functionalities}
|
405 |
+
-----------------------------------
|
406 |
+
|
407 |
+
Your task is to:
|
408 |
+
If you don't find any "Code-Blocks" then,
|
409 |
+
**Don't generate Pseudo Code, and pass the message "No Code-blocks" find...
|
410 |
+
If you find any "Code-Blocks" then,
|
411 |
+
1. **Refine the 'logic'**: Make it precise, accurate, and fully aligned with the Game Description. Use Scratch‑consistent verbs and phrasing. **Do NOT** use raw double‑quotes inside the logic string.
|
412 |
+
|
413 |
+
2. **Structural requirements**:
|
414 |
+
- **Numeric values** `(e.g., 0, 5, 0.2, -130)` **must** be in parentheses: `(0)`, `(5)`, `(0.2)`, `(-130)`.
|
415 |
+
- **AlphaNumeric values** `(e.g., hello, say 5, 4, hi!)` **must** be in parentheses: `(hello)`, `(say 5)`, `(4)`, `(hi!)`.
|
416 |
+
- **Variables** must be in the form `[variable v]` (e.g., `[score v]`), even when used inside expressions two example use `set [score v] to (1)` or `show variable ([speed v])`.
|
417 |
+
- **Dropdown options** must be in the form `[option v]` (e.g., `[Game Start v]`, `[blue sky v]`). example use `when [space v] key pressed`.
|
418 |
+
- **Reporter blocks** used as inputs must be double‑wrapped: `((x position))`, `((y position))`. example use `if <((y position)) = (-130)> then` or `(((x position)) * (1))`.
|
419 |
+
- **Boolean blocks** in conditions must be inside `< >`, including nested ones: `<not <condition>>`, `<<cond1> and <cond2>>`,`<<cond1> or <cond2>>`.
|
420 |
+
- **Other Boolean blocks** in conditions must be inside `< >`, including nested ones or values or variables: `<(block/value/variable) * (block/value/variable)>`,`<(block/value/variable) < (block/value/variable)>`, and example of another variable`<[apple v] contains [a v]?>`.
|
421 |
+
- **Operator expressions** must use explicit Scratch operator blocks, e.g.:
|
422 |
+
```
|
423 |
+
(([ballSpeed v]) * (1.1))
|
424 |
+
```
|
425 |
+
- **Every hat block script must end** with a final `end` on its own line.
|
426 |
+
|
427 |
+
3. **Pseudo‑code formatting**:
|
428 |
+
- Represent each block or nested block on its own line.
|
429 |
+
- Indent nested blocks by 4 spaces under their parent (`forever`, `if`, etc.).
|
430 |
+
- No comments or explanatory text—just the block sequence.
|
431 |
+
- a natural language breakdown of each step taken after the event, formatted as a multi-line string representing pseudo-code. Ensure clarity and granularity—each described action should map closely to a Scratch block or tight sequence.
|
432 |
+
|
433 |
+
4. **Logic content**:
|
434 |
+
- Build clear flow for mechanics (movement, jumping, flying, scoring, collisions).
|
435 |
+
- Match each action closely to a Scratch block or tight sequence.
|
436 |
+
- Do **NOT** include any justification or comments—only the raw logic.
|
437 |
+
|
438 |
+
5. **Examples for reference**:
|
439 |
+
**Correct** pattern for a simple start script:
|
440 |
+
```
|
441 |
+
when green flag clicked
|
442 |
+
switch backdrop to [blue sky v]
|
443 |
+
set [score v] to (0)
|
444 |
+
show variable [score v]
|
445 |
+
broadcast [Game Start v]
|
446 |
+
end
|
447 |
+
```
|
448 |
+
**Correct** pattern for updating the high score variable handling:
|
449 |
+
```
|
450 |
+
when I receive [Game Over v]
|
451 |
+
if <((score)) > (([High Score v]))> then
|
452 |
+
set [High Score v] to ([score v])
|
453 |
+
end
|
454 |
+
switch backdrop to [Game Over v]
|
455 |
+
end
|
456 |
+
```
|
457 |
+
**Correct** pattern for level up and increase difficulty use:
|
458 |
+
```
|
459 |
+
when I receive [Level Up v]
|
460 |
+
change [level v] by (1)
|
461 |
+
set [ballSpeed v] to ((([ballSpeed v]) * (1.1)))
|
462 |
+
end
|
463 |
+
```
|
464 |
+
**Correct** pattern for jumping mechanics use:
|
465 |
+
```
|
466 |
+
when [space v] key pressed
|
467 |
+
if <((y position)) = (-100)> then
|
468 |
+
repeat (5)
|
469 |
+
change y by (100)
|
470 |
+
wait (0.1) seconds
|
471 |
+
change y by (-100)
|
472 |
+
wait (0.1) seconds
|
473 |
+
end
|
474 |
+
end
|
475 |
+
end
|
476 |
+
```
|
477 |
+
**Correct** pattern for continuos moving objects use:
|
478 |
+
```
|
479 |
+
when green flag clicked
|
480 |
+
go to x: (240) y: (-100)
|
481 |
+
set [speed v] to (-5)
|
482 |
+
show variable [speed v]
|
483 |
+
forever
|
484 |
+
change x by ([speed v])
|
485 |
+
if <((x position)) < (-240)> then
|
486 |
+
go to x: (240) y: (-100)
|
487 |
+
end
|
488 |
+
end
|
489 |
+
end
|
490 |
+
```
|
491 |
+
**Correct** pattern for continuos moving objects use:
|
492 |
+
```
|
493 |
+
when green flag clicked
|
494 |
+
go to x: (240) y: (-100)
|
495 |
+
set [speed v] to (-5)
|
496 |
+
show variable [speed v]
|
497 |
+
forever
|
498 |
+
change x by ([speed v])
|
499 |
+
if <((x position)) < (-240)> then
|
500 |
+
go to x: (240) y: (-100)
|
501 |
+
end
|
502 |
+
end
|
503 |
+
end
|
504 |
+
```
|
505 |
+
6. **Donot** add any explaination of logic or comments to justify or explain just put the logic content in the json.
|
506 |
+
7. **Output**:
|
507 |
+
Return **only** a JSON object, using double quotes everywhere:
|
508 |
+
```json
|
509 |
+
{{
|
510 |
+
"refined_logic":{{
|
511 |
+
"name_variable": 'Value of "Sript for: "',
|
512 |
+
"pseudocode":"…your fully‑formatted pseudo‑code here…",
|
513 |
+
}}
|
514 |
+
}}
|
515 |
+
```
|
516 |
+
"""
|
517 |
+
image_input = {
|
518 |
+
"type": "image_url",
|
519 |
+
"image_url": {
|
520 |
+
"url": f"data:image/png;base64,{image}"
|
521 |
+
}
|
522 |
+
}
|
523 |
+
|
524 |
+
content = [
|
525 |
+
{"type": "text", "text": refinement_prompt},
|
526 |
+
image_input
|
527 |
+
]
|
528 |
+
|
529 |
+
try:
|
530 |
+
# Invoke the main agent for logic refinement and relationship identification
|
531 |
+
response = agent.invoke({"messages": [{"role": "user", "content": content}]})
|
532 |
+
llm_output_raw = response["messages"][-1].content.strip()
|
533 |
+
|
534 |
+
parsed_llm_output = extract_json_from_llm_response(llm_output_raw)
|
535 |
+
|
536 |
+
result = parsed_llm_output
|
537 |
+
|
538 |
+
print(f"result:\n\n {result}")
|
539 |
+
return result
|
540 |
+
except Exception as e:
|
541 |
+
logger.error(f"❌ plan_logic_aligner_node failed: {str(e)}")
|
542 |
+
return {"error": str(e)}
|
543 |
+
except json.JSONDecodeError as error_json:
|
544 |
+
# If JSON parsing fails, use the json resolver agent
|
545 |
+
correction_prompt = (
|
546 |
+
"Your task is to correct the provided JSON string to ensure it is **syntactically perfect and adheres strictly to JSON rules**.\n"
|
547 |
+
"It must be a JSON object with `refined_logic` (string) and `block_relationships` (array of objects).\n"
|
548 |
+
f"- **Error Details**: {error_json}\n\n"
|
549 |
+
"**Strict Instructions for your response:**\n"
|
550 |
+
"1. **ONLY** output the corrected JSON. Do not include any other text or explanations.\n"
|
551 |
+
"2. Ensure all keys and string values are enclosed in **double quotes**. Escape internal quotes (`\\`).\n"
|
552 |
+
"3. No trailing commas. Correct nesting.\n\n"
|
553 |
+
"Here is the problematic JSON string to correct:\n"
|
554 |
+
f"```json\n{llm_output_raw}\n```\n"
|
555 |
+
"Corrected JSON:\n"
|
556 |
+
)
|
557 |
+
try:
|
558 |
+
correction_response = agent_json_resolver.invoke({"messages": [{"role": "user", "content": correction_prompt}]})
|
559 |
+
corrected_output = extract_json_from_llm_response(correction_response["messages"][-1].content)
|
560 |
+
#block_relationships = corrected_output.get("block_relationships", [])
|
561 |
+
result = {
|
562 |
+
#"image_path": image_path,
|
563 |
+
"pseudo_code": corrected_output
|
564 |
+
}
|
565 |
+
|
566 |
+
return result
|
567 |
+
|
568 |
+
except Exception as e_corr:
|
569 |
+
logger.error(f"Failed to correct JSON output for even after retry: {e_corr}")
|
570 |
+
|
571 |
+
'''
|
572 |
+
def get_desc_pseudo(image_path: str, index: int = 1) -> dict:
|
573 |
+
"""
|
574 |
+
Takes a path to a code-block image and returns a dict with:
|
575 |
+
- 'pseudo_code': pseudo-code representing logic in Scratch block format
|
576 |
+
If output_pseudo_path is provided, saves the output to a JSON file with given structure.
|
577 |
+
"""
|
578 |
+
try:
|
579 |
+
image = Image.open(image_path).convert("RGB")
|
580 |
+
|
581 |
+
# Load image and encode to base64
|
582 |
+
with open(image_path, "rb") as image_file:
|
583 |
+
image_bytes = image_file.read()
|
584 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
585 |
+
|
586 |
+
prompt_desc = "Analyze this Scratch code-block image and generate a short caption of what this logic appears to be doing."
|
587 |
+
prompt_pseudo = """
|
588 |
+
Convert this Scratch code-block image into clear pseudo-code in Scratch format.
|
589 |
+
Use Scratch-style syntax (e.g., 'when green flag clicked', 'repeat', 'move (10)',
|
590 |
+
'if <condition> then', etc.) in a multi-line string."""
|
591 |
+
|
592 |
+
system_prompt = """
|
593 |
+
You are an expert in Scratch 3.0 logic reconstruction. You will receive an image of a code block.
|
594 |
+
1. First describe briefly what this script is doing.
|
595 |
+
2. Then generate Scratch-like pseudo-code line-by-line, matching blocks in the image.
|
596 |
+
Return JSON with 'pseudo_code'.
|
597 |
+
"""
|
598 |
+
|
599 |
+
# Build LangChain agent
|
600 |
+
agent = create_react_agent(
|
601 |
+
model=llm,
|
602 |
+
tools=[],
|
603 |
+
prompt=system_prompt
|
604 |
+
)
|
605 |
+
|
606 |
+
content = [
|
607 |
+
{"type": "text", "text": prompt_desc},
|
608 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_base64}"}}
|
609 |
+
]
|
610 |
+
desc_response = agent.invoke({"messages": [{"role": "user", "content": content}]})
|
611 |
+
caption = desc_response["messages"][-1].content.strip()
|
612 |
+
|
613 |
+
content_pseudo = [
|
614 |
+
{"type": "text", "text": prompt_pseudo},
|
615 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_base64}"}}
|
616 |
+
]
|
617 |
+
pseudo_response = agent.invoke({"messages": [{"role": "user", "content": content_pseudo}]})
|
618 |
+
pseudo_code = pseudo_response["messages"][-1].content.strip()
|
619 |
+
|
620 |
+
result = {
|
621 |
+
"image_path": image_path,
|
622 |
+
"caption": caption,
|
623 |
+
"pseudo_code": pseudo_code
|
624 |
+
}
|
625 |
+
# --- Fixed output path ---
|
626 |
+
output_json_path = os.path.join("outputs", "pseudo_output.json")
|
627 |
+
os.makedirs(os.path.dirname(output_json_path), exist_ok=True)
|
628 |
+
|
629 |
+
if os.path.exists(output_json_path):
|
630 |
+
with open(output_json_path, "r") as f:
|
631 |
+
existing = json.load(f)
|
632 |
+
else:
|
633 |
+
existing = {"plan": []}
|
634 |
+
|
635 |
+
pseudo_key = f"pseudo{index}"
|
636 |
+
existing["plan"].append({pseudo_key: result})
|
637 |
+
|
638 |
+
with open(output_json_path, "w") as f:
|
639 |
+
json.dump(existing, f, indent=2)
|
640 |
+
|
641 |
+
logger.info(f"✅ Saved pseudo-code to: {output_json_path}")
|
642 |
+
return result
|
643 |
+
|
644 |
+
except Exception as e:
|
645 |
+
logger.error(f"❌ get_desc_pseudo failed for {image_path}: {e}")
|
646 |
+
return {
|
647 |
+
"image_path": image_path,
|
648 |
+
"error": str(e)
|
649 |
+
}'''
|
650 |
+
|
651 |
+
scratch_keywords = [
|
652 |
+
"move", "turn", "wait", "repeat", "if", "else", "broadcast",
|
653 |
+
"glide", "change", "forever", "when", "switch",
|
654 |
+
"next costume", "set", "show", "hide", "play sound",
|
655 |
+
"go to", "x position", "y position", "think", "say",
|
656 |
+
"variable", "stop", "clone",
|
657 |
+
"touching", "sensing", "pen", "clear","Scratch","Code","scratch blocks"
|
658 |
+
]
|
659 |
+
|
660 |
+
# --- FUNCTION: Extract images from saved PDF ---
|
661 |
+
def extract_images_from_pdf(pdf_path, final_json_path_2):
|
662 |
+
''' Extract images from PDF and generate structured sprite JSON '''
|
663 |
+
try:
|
664 |
+
pdf_filename = os.path.splitext(os.path.basename(pdf_path))[0] # e.g., "scratch_crab"
|
665 |
+
pdf_dir_path = os.path.dirname(pdf_path).replace("/", "\\")
|
666 |
+
|
667 |
+
# Create subfolders
|
668 |
+
extracted_image_subdir = os.path.join(DETECTED_IMAGE_FOLDER_PATH, pdf_filename)
|
669 |
+
json_subdir = os.path.join(JSON_FOLDER_PATH, pdf_filename)
|
670 |
+
os.makedirs(extracted_image_subdir, exist_ok=True)
|
671 |
+
os.makedirs(json_subdir, exist_ok=True)
|
672 |
+
|
673 |
+
# Output paths
|
674 |
+
output_json_path = os.path.join(json_subdir, "extracted.json")
|
675 |
+
final_json_path = os.path.join(json_subdir, "extracted_sprites.json")
|
676 |
+
final_json_path_2 = os.path.join(json_subdir, "extracted_sprites_2.json")
|
677 |
+
|
678 |
+
try:
|
679 |
+
elements = partition_pdf(
|
680 |
+
filename=pdf_path,
|
681 |
+
strategy="hi_res",
|
682 |
+
extract_image_block_types=["Image"],
|
683 |
+
extract_image_block_to_payload=True, # Set to True to get base64 in output
|
684 |
+
)
|
685 |
+
except Exception as e:
|
686 |
+
raise RuntimeError(
|
687 |
+
f"❌ Failed to extract images from PDF: {str(e)}")
|
688 |
+
|
689 |
+
try:
|
690 |
+
with open(output_json_path, "w") as f:
|
691 |
+
json.dump([element.to_dict()
|
692 |
+
for element in elements], f, indent=4)
|
693 |
+
except Exception as e:
|
694 |
+
raise RuntimeError(f"❌ Failed to write extracted.json: {str(e)}")
|
695 |
+
|
696 |
+
try:
|
697 |
+
# Display extracted images
|
698 |
+
with open(output_json_path, 'r') as file:
|
699 |
+
file_elements = json.load(file)
|
700 |
+
except Exception as e:
|
701 |
+
raise RuntimeError(f"❌ Failed to read extracted.json: {str(e)}")
|
702 |
+
|
703 |
+
# Prepare manipulated sprite JSON structure
|
704 |
+
manipulated_json = {}
|
705 |
+
|
706 |
+
# SET A SYSTEM PROMPT
|
707 |
+
system_prompt = """
|
708 |
+
You are an expert in visual scene understanding.
|
709 |
+
Your Job is to analyze an image and respond acoording if asked for name give simple name by analyzing it and if ask for descrption generate a short description covering its elements.
|
710 |
+
|
711 |
+
Guidelines:
|
712 |
+
- Focus only the images given in Square Shape.
|
713 |
+
- Don't Consider Blank areas in Image as.
|
714 |
+
- Don't include generic summary or explanation outside the fields.
|
715 |
+
Return only string.
|
716 |
+
"""
|
717 |
+
agent = create_react_agent(
|
718 |
+
model=llm,
|
719 |
+
tools=[],
|
720 |
+
prompt=system_prompt
|
721 |
+
)
|
722 |
+
|
723 |
+
# If JSON already exists, load it and find the next available Sprite number
|
724 |
+
if os.path.exists(final_json_path):
|
725 |
+
with open(final_json_path, "r") as existing_file:
|
726 |
+
manipulated = json.load(existing_file)
|
727 |
+
# Determine the next available index (e.g., Sprite 4 if 1–3 already exist)
|
728 |
+
existing_keys = [int(k.replace("Sprite ", ""))
|
729 |
+
for k in manipulated.keys()]
|
730 |
+
start_count = max(existing_keys, default=0) + 1
|
731 |
+
else:
|
732 |
+
start_count = 1
|
733 |
+
|
734 |
+
sprite_count = start_count
|
735 |
+
for i, element in enumerate(file_elements):
|
736 |
+
if "image_base64" in element["metadata"]:
|
737 |
+
try:
|
738 |
+
image_data = base64.b64decode(
|
739 |
+
element["metadata"]["image_base64"])
|
740 |
+
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
741 |
+
|
742 |
+
image = upscale_image(image, scale=2)
|
743 |
+
# image.show(title=f"Extracted Image {i+1}")
|
744 |
+
image_path = os.path.join(extracted_image_subdir, f"Sprite_{i+1}.png")
|
745 |
+
image.save(image_path) # don't need to store image in local folder, process it from variable
|
746 |
+
|
747 |
+
with open(image_path, "rb") as image_file:
|
748 |
+
image_bytes = image_file.read()
|
749 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
750 |
+
|
751 |
+
# buffered = io.BytesIO()
|
752 |
+
# image.save(buffered, format="PNG")
|
753 |
+
# img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
754 |
+
# description = get_smolvlm_caption(image, prompt="Give a brief Description")
|
755 |
+
# name = get_smolvlm_caption(image, prompt="give a short name/title of this Image.")
|
756 |
+
|
757 |
+
# def clean_caption_output(raw_output: str, prompt: str) -> str:
|
758 |
+
# answer = raw_output.replace(prompt, '').replace(
|
759 |
+
# "<image>", '').strip(" :-\n")
|
760 |
+
# return answer
|
761 |
+
|
762 |
+
# prompt_description = "Give a brief Captioning. If any Image include the text or any logical block structure give it name as default 'scratch blocks'"
|
763 |
+
# prompt_name = "give a short name caption of this Image. If any Image include the text or any logical block structure give it description as default 'scratch blocks'"
|
764 |
+
|
765 |
+
# content1 = [
|
766 |
+
# {
|
767 |
+
# "type": "text",
|
768 |
+
# "text": f"{prompt_description}"
|
769 |
+
# },
|
770 |
+
# {
|
771 |
+
# "type": "image_url",
|
772 |
+
# "image_url": {
|
773 |
+
# "url": f"data:image/jpeg;base64,{img_base64}"
|
774 |
+
# }
|
775 |
+
# }
|
776 |
+
# ]
|
777 |
+
# response1 = agent.invoke(
|
778 |
+
# {"messages": [{"role": "user", "content": content1}]})
|
779 |
+
# # print(response1)
|
780 |
+
# description = response1["messages"][-1].content
|
781 |
+
|
782 |
+
# content2 = [
|
783 |
+
# {
|
784 |
+
# "type": "text",
|
785 |
+
# "text": f"{prompt_name}"
|
786 |
+
# },
|
787 |
+
# {
|
788 |
+
# "type": "image_url",
|
789 |
+
# "image_url": {
|
790 |
+
# "url": f"data:image/jpeg;base64,{img_base64}"
|
791 |
+
# }
|
792 |
+
# }
|
793 |
+
# ]
|
794 |
+
|
795 |
+
# response2 = agent.invoke(
|
796 |
+
# {"messages": [{"role": "user", "content": content2}]})
|
797 |
+
# # print(response2)
|
798 |
+
# name = response2["messages"][-1].content
|
799 |
+
|
800 |
+
# Combined Prompt for Name + Discription
|
801 |
+
prompt_combined = """
|
802 |
+
Analyze this image and return JSON with keys:# modify prompt for "name", if it detects "code-blocks only then give name as 'scratch-block'"
|
803 |
+
{
|
804 |
+
"name": "<short name or 'scratch blocks'>" ,
|
805 |
+
"description": "<short description>"
|
806 |
+
}
|
807 |
+
Guidelines:
|
808 |
+
- If image contains logical/code blocks from Scratch (e.g., move, turn, repeat, when clicked, etc.), use 'scratch-block' as the name.
|
809 |
+
- If image is a character, object, or backdrop, give an appropriate descriptive name instead.
|
810 |
+
- Avoid generic names like 'image1' or 'picture'.
|
811 |
+
- Keep the response strictly in JSON format.
|
812 |
+
"""
|
813 |
+
|
814 |
+
content = [
|
815 |
+
{"type": "text", "text": prompt_combined},
|
816 |
+
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}}
|
817 |
+
]
|
818 |
+
|
819 |
+
response = agent.invoke({"messages": [{"role": "user", "content": content}]})
|
820 |
+
result_json = json.loads(response["messages"][-1].content)
|
821 |
+
try:
|
822 |
+
name = result_json.get("name", "").strip()
|
823 |
+
description = result_json.get("description", "").strip()
|
824 |
+
except Exception as e:
|
825 |
+
logger.error(f"⚠️ Failed to extract name/description: {str(e)}")
|
826 |
+
name = "unknown"
|
827 |
+
description = "unknown"
|
828 |
+
|
829 |
+
# def is_valid_sprite_or_backdrop(desc):
|
830 |
+
# desc = desc.lower()
|
831 |
+
# sprite_keywords = ["character", "cartoon", "sprite", "figure", "animal", "person", "robot", "creature"]
|
832 |
+
# backdrop_keywords = ["scene", "background", "forest", "room", "underwater", "sky", "ocean", "mountain", "city", "village"]
|
833 |
+
# if any(kw in desc for kw in sprite_keywords + backdrop_keywords):
|
834 |
+
# return True
|
835 |
+
# return False
|
836 |
+
|
837 |
+
# if not is_valid_sprite_or_backdrop(description):
|
838 |
+
# logger.warning(f"🟡 Skipped non-sprite/backdrop image {i+1}: {description}")
|
839 |
+
# continue
|
840 |
+
|
841 |
+
# def is_scratch_code_block(desc):
|
842 |
+
# desc = desc.lower()
|
843 |
+
# scratch_block_keyword = ["Scratch", "Code", "scratch blocks","move", "steps", "degree",
|
844 |
+
# "turn","switch","go to","random position", "glide", "direction",
|
845 |
+
# "hide variable"]
|
846 |
+
# return any(kw in desc for kw in scratch_block_keyword)
|
847 |
+
|
848 |
+
# if is_scratch_code_block(description):
|
849 |
+
# logger.warning(f"⛔ Skipped code block image {i+1}: {description}")
|
850 |
+
# logger.info(is_scratch_code_block(description))
|
851 |
+
# continue
|
852 |
+
|
853 |
+
# raw_description = get_smolvlm_caption(image, prompt=prompt_description)
|
854 |
+
# raw_name = get_smolvlm_caption(image, prompt=prompt_name)
|
855 |
+
|
856 |
+
# description = clean_caption_output(raw_description, prompt_description)
|
857 |
+
# name = clean_caption_output(raw_name, prompt_name)
|
858 |
+
|
859 |
+
manipulated_json[f"Sprite {sprite_count}"] = {
|
860 |
+
"name": name,
|
861 |
+
"base64": element["metadata"]["image_base64"],
|
862 |
+
"file-path": pdf_dir_path,
|
863 |
+
"description": description
|
864 |
+
}
|
865 |
+
sprite_count += 1
|
866 |
+
except Exception as e:
|
867 |
+
print(f"⚠️ Error processing Sprite {i+1}: {str(e)}")
|
868 |
+
|
869 |
+
# # New dictionary to store only valid sprites
|
870 |
+
# filtered_sprites = {}
|
871 |
+
# for sprite_id, sprite_data in manipulated_json.items():
|
872 |
+
# desc = sprite_data.get("description", "").lower()
|
873 |
+
# # If no scratch block-like word found, accept this sprite
|
874 |
+
# if not any(keyword in desc for keyword in scratch_keywords):
|
875 |
+
# filtered_sprites[sprite_id] = sprite_data
|
876 |
+
# else:
|
877 |
+
# logger.info(f"🧱 Detected Scratch code block in: {sprite_id}, skipping...")
|
878 |
+
|
879 |
+
# Save manipulated JSON
|
880 |
+
with open(final_json_path, "w") as sprite_file:
|
881 |
+
json.dump(manipulated_json, sprite_file, indent=4)
|
882 |
+
|
883 |
+
def is_code_block(name: str) -> bool:
|
884 |
+
for kw in scratch_keywords:
|
885 |
+
if kw.lower() in name.lower():
|
886 |
+
return True
|
887 |
+
return False
|
888 |
+
|
889 |
+
# Filter out code block images
|
890 |
+
filtered_sprites = {}
|
891 |
+
for key, value in manipulated_json.items():
|
892 |
+
sprite_name = value.get("name", "")
|
893 |
+
if not is_code_block(sprite_name):
|
894 |
+
filtered_sprites[key] = value
|
895 |
+
else:
|
896 |
+
logger.info(f"🛑 Excluded code block-like image: {key}")
|
897 |
+
|
898 |
+
# if not any(is_code_block(value.get("name","")) for value in manipulated_json.values()):
|
899 |
+
# return jsonify({"message":"Invalid Content"}), 400
|
900 |
+
# if not filtered_sprites:
|
901 |
+
# return "Invalid Content", {}
|
902 |
+
|
903 |
+
# Overwrite with filtered content
|
904 |
+
with open(final_json_path_2, "w") as sprite_file:
|
905 |
+
json.dump(filtered_sprites, sprite_file, indent=4)
|
906 |
+
# print(f"✅ Manipulated sprite JSON saved: {final_json_path}")
|
907 |
+
|
908 |
+
return final_json_path, manipulated_json
|
909 |
+
except Exception as e:
|
910 |
+
raise RuntimeError(f"❌ Error in extract_images_from_pdf: {str(e)}")
|
911 |
+
|
912 |
+
def similarity_matching(input_json_path: str, project_folder:str) -> str:
|
913 |
+
|
914 |
+
logger.info("🔍 Running similarity matching...")
|
915 |
+
|
916 |
+
# ============================== #
|
917 |
+
# DEFINE PATHS #
|
918 |
+
# ============================== #
|
919 |
+
backdrop_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\Backdrops"
|
920 |
+
sprite_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\sprites"
|
921 |
+
# backdrop_images_path = os.getenv("BACKDROP_FOLDER_PATH", "/app/reference/backdrops")
|
922 |
+
# sprite_images_path = os.getenv("SPRITE_FOLDER_PATH", "/app/reference/sprites")
|
923 |
+
image_dirs = [backdrop_images_path, sprite_images_path]
|
924 |
+
|
925 |
+
project_json_path = os.path.join(project_folder, "project.json")
|
926 |
+
|
927 |
+
# ============================== #
|
928 |
+
# READ SPRITE METADATA #
|
929 |
+
# ============================== #
|
930 |
+
with open(input_json_path, 'r') as f:
|
931 |
+
sprites_data = json.load(f)
|
932 |
+
|
933 |
+
sprite_ids, texts, sprite_base64 = [], [], []
|
934 |
+
for sid, sprite in sprites_data.items():
|
935 |
+
sprite_ids.append(sid)
|
936 |
+
texts.append(
|
937 |
+
"This is " + sprite.get("description", sprite.get("name", "")))
|
938 |
+
sprite_base64.append(sprite["base64"])
|
939 |
+
|
940 |
+
# ========================================= #
|
941 |
+
# Walk folders to collect all image paths #
|
942 |
+
# ========================================= #
|
943 |
+
folder_image_paths = ['E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\badroom3.sb3\\8cc0b88d53345b3e337e8f028a32a4e7.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\baseball2.sb3\\7be1f5b3e682813dac1f297e52ff7dca.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\beach_malibu.sb3\\050615fe992a00d6af0e664e497ebf53.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\castle2.sb3\\951765ee7f7370f120c9df20b577c22f.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\hall.sb3\\ea86ca30b346f27ca5faf1254f6a31e3.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\Backdrops\\jungle.sb3\\f4f908da19e2753f3ed679d7b37650ca.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Batter.sprite3\\baseball_sprite_motion_1.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Bear.sprite3\\bear_motion_2.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Beetle.sprite3\\46d0dfd4ae7e9bfe3a6a2e35a4905eae.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\cat\\cat_motion_1.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Centaur.sprite3\\2373556e776cad3ba4d6ee04fc34550b.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Crab.sprite3\\bear_element.png', 'E:\\Pratham\\2025\\Harsh Sir\\Scratch Vision\\images\\sprites\\Soccer Ball.sprite3\\cat_football.png']
|
944 |
+
# for image_dir in image_dirs:
|
945 |
+
# for root, _, files in os.walk(image_dir):
|
946 |
+
# for fname in files:
|
947 |
+
# if fname.lower().endswith((".png", ".jpg", ".jpeg")):
|
948 |
+
# folder_image_paths.append(os.path.join(root, fname))
|
949 |
+
# print(f"\n\nfolder_image_paths: \n{folder_image_paths}")
|
950 |
+
"""
|
951 |
+
# # ============================== #
|
952 |
+
# # EMBED FOLDER IMAGES (REF) #
|
953 |
+
# # ============================== #
|
954 |
+
img_features = clip_embd.embed_image(folder_image_paths)
|
955 |
+
|
956 |
+
# # ============================== #
|
957 |
+
# # Store image embeddings #
|
958 |
+
# # ============================== #
|
959 |
+
embedding_json = []
|
960 |
+
for i, path in enumerate(folder_image_paths):
|
961 |
+
embedding_json.append({
|
962 |
+
"name":os.path.basename(path),
|
963 |
+
"file-path": path,
|
964 |
+
"embeddings": list(img_features[i])
|
965 |
+
})
|
966 |
+
|
967 |
+
# # Save to embeddings.json
|
968 |
+
with open(f"{OUTPUT_FOLDER}/embeddings.json", "w") as f:
|
969 |
+
json.dump(embedding_json, f, indent=2)"""
|
970 |
+
|
971 |
+
# ============================== #
|
972 |
+
# DECODE SPRITE IMAGES #
|
973 |
+
# ============================== #
|
974 |
+
temp_dir = tempfile.mkdtemp()
|
975 |
+
sprite_image_paths = []
|
976 |
+
for idx, b64 in enumerate(sprite_base64):
|
977 |
+
image_data = base64.b64decode(b64.split(",")[-1])
|
978 |
+
img = Image.open(BytesIO(image_data)).convert("RGB")
|
979 |
+
temp_path = os.path.join(temp_dir, f"sprite_{idx}.png")
|
980 |
+
img.save(temp_path)
|
981 |
+
sprite_image_paths.append(temp_path)
|
982 |
+
print(f"\n\n\nSPRITE IMAGE PATHS: \n{sprite_image_paths}")
|
983 |
+
|
984 |
+
# ============================== #
|
985 |
+
# EMBED SPRITE IMAGES #
|
986 |
+
# ============================== #
|
987 |
+
sprite_features = clip_embd.embed_image(sprite_image_paths)
|
988 |
+
|
989 |
+
# ============================== #
|
990 |
+
# COMPUTE SIMILARITIES #
|
991 |
+
# ============================== #
|
992 |
+
with open(f"{OUTPUT_FOLDER}/embeddings.json", "r") as f:
|
993 |
+
embedding_json = json.load(f)
|
994 |
+
# print(f"\n\n EMBEDDING JSON: {embedding_json}")
|
995 |
+
|
996 |
+
img_matrix = np.array([img["embeddings"] for img in embedding_json])
|
997 |
+
sprite_matrix = np.array(sprite_features)
|
998 |
+
|
999 |
+
# if sprite_matrix.size == 0 or img_matrix.size == 0:
|
1000 |
+
# raise RuntimeError("❌ No valid embeddings found for sprites or reference images.")
|
1001 |
+
similarity = np.matmul(sprite_matrix, img_matrix.T)
|
1002 |
+
# try:
|
1003 |
+
# similarity = np.matmul(sprite_matrix, img_matrix.T)
|
1004 |
+
# except ValueError as ve:
|
1005 |
+
# if "matmul" in str(ve) and "size" in str(ve):
|
1006 |
+
# logger.error("❌ Matrix multiplication failed due to shape mismatch. Likely due to empty or invalid embeddings.")
|
1007 |
+
# raise RuntimeError("Matrix shape mismatch: CLIP embedding input is invalid or empty.")
|
1008 |
+
# else:
|
1009 |
+
# raise
|
1010 |
+
most_similar_indices = np.argmax(similarity, axis=1)
|
1011 |
+
print(f"")
|
1012 |
+
# ============= Match and copy ===============
|
1013 |
+
project_data = []
|
1014 |
+
copied_folders = set()
|
1015 |
+
|
1016 |
+
# =============================================================== #
|
1017 |
+
# Loop through most similar images from Sprites folder #
|
1018 |
+
# → Copy sprite assets (excluding matched image + sprite.json) #
|
1019 |
+
# → Load sprite.json and append its data to project_data #
|
1020 |
+
# =============================================================== #
|
1021 |
+
for sprite_idx, matched_idx in enumerate(most_similar_indices):
|
1022 |
+
matched_image_path = folder_image_paths[matched_idx]
|
1023 |
+
matched_image_path = os.path.normpath(matched_image_path)
|
1024 |
+
|
1025 |
+
matched_folder = os.path.dirname(matched_image_path)
|
1026 |
+
folder_name = os.path.basename(matched_folder)
|
1027 |
+
|
1028 |
+
if matched_folder in copied_folders:
|
1029 |
+
continue
|
1030 |
+
copied_folders.add(matched_folder)
|
1031 |
+
logger.info(f"Matched image path: {matched_image_path}")
|
1032 |
+
|
1033 |
+
sprite_json_path = os.path.join(matched_folder, 'sprite.json')
|
1034 |
+
if not os.path.exists(sprite_json_path):
|
1035 |
+
logger.warning(f"sprite.json not found in: {matched_folder}")
|
1036 |
+
continue
|
1037 |
+
|
1038 |
+
with open(sprite_json_path, 'r') as f:
|
1039 |
+
sprite_data = json.load(f)
|
1040 |
+
# print(f"SPRITE DATA: \n{sprite_data}")
|
1041 |
+
# Copy only non-matched files
|
1042 |
+
for fname in os.listdir(matched_folder):
|
1043 |
+
fpath = os.path.join(matched_folder, fname)
|
1044 |
+
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'sprite.json'}:
|
1045 |
+
shutil.copy2(fpath, os.path.join(project_folder, fname))
|
1046 |
+
# logger.info(f"Copied Sprite asset: {fname}")
|
1047 |
+
project_data.append(sprite_data)
|
1048 |
+
|
1049 |
+
# ================================================================== #
|
1050 |
+
# Loop through most similar images from Backdrops folder #
|
1051 |
+
# → Copy Backdrop assets (excluding matched image + project.json) #
|
1052 |
+
# → Load project.json and append its data to project_data #
|
1053 |
+
# ================================================================== #
|
1054 |
+
backdrop_data = [] # for backdrop-related entries
|
1055 |
+
|
1056 |
+
for backdrop_idx, matched_idx in enumerate(most_similar_indices):
|
1057 |
+
matched_image_path = os.path.normpath(folder_image_paths[matched_idx])
|
1058 |
+
|
1059 |
+
# Check if the match is from the Backdrops folder
|
1060 |
+
if matched_image_path.startswith(os.path.normpath(backdrop_images_path)):
|
1061 |
+
matched_folder = os.path.dirname(matched_image_path)
|
1062 |
+
folder_name = os.path.basename(matched_folder)
|
1063 |
+
|
1064 |
+
logger.info(f"Backdrop matched image: {matched_image_path}")
|
1065 |
+
|
1066 |
+
# Copy only non-matched files
|
1067 |
+
for fname in os.listdir(matched_folder):
|
1068 |
+
fpath = os.path.join(matched_folder, fname)
|
1069 |
+
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'project.json'}:
|
1070 |
+
shutil.copy2(fpath, os.path.join(project_folder, fname))
|
1071 |
+
# logger.info(f"Copied Backdrop asset: {fname}")
|
1072 |
+
|
1073 |
+
# Append backdrop's project.json
|
1074 |
+
backdrop_json_path = os.path.join(matched_folder, 'project.json')
|
1075 |
+
if os.path.exists(backdrop_json_path):
|
1076 |
+
with open(backdrop_json_path, 'r') as f:
|
1077 |
+
backdrop_json_data = json.load(f)
|
1078 |
+
# print(f"SPRITE DATA: \n{backdrop_json_data}")
|
1079 |
+
if "targets" in backdrop_json_data:
|
1080 |
+
for target in backdrop_json_data["targets"]:
|
1081 |
+
if target.get("isStage") == True:
|
1082 |
+
backdrop_data.append(target)
|
1083 |
+
else:
|
1084 |
+
logger.warning(f"project.json not found in: {matched_folder}")
|
1085 |
+
|
1086 |
+
'''
|
1087 |
+
project_data, backdrop_data = [], []
|
1088 |
+
copied_folders = set()
|
1089 |
+
for sprite_idx, matched_idx in enumerate(most_similar_indices):
|
1090 |
+
matched_entry = folder_image_paths[matched_idx]
|
1091 |
+
# matched_image_path = os.path.normpath(folder_image_paths[matched_idx])
|
1092 |
+
matched_image_path = os.path.normpath(matched_entry["file-path"])
|
1093 |
+
matched_folder = os.path.dirname(matched_image_path)
|
1094 |
+
if matched_folder in copied_folders:
|
1095 |
+
continue
|
1096 |
+
copied_folders.add(matched_folder)
|
1097 |
+
|
1098 |
+
# Sprite
|
1099 |
+
sprite_json_path = os.path.join(matched_folder, 'sprite.json')
|
1100 |
+
if os.path.exists(sprite_json_path):
|
1101 |
+
with open(sprite_json_path, 'r') as f:
|
1102 |
+
sprite_data = json.load(f)
|
1103 |
+
project_data.append(sprite_data)
|
1104 |
+
|
1105 |
+
for fname in os.listdir(matched_folder):
|
1106 |
+
if fname not in {os.path.basename(matched_image_path), 'sprite.json'}:
|
1107 |
+
shutil.copy2(os.path.join(
|
1108 |
+
matched_folder, fname), project_folder)
|
1109 |
+
|
1110 |
+
# Backdrop
|
1111 |
+
if matched_image_path.startswith(os.path.normpath(backdrop_images_path)):
|
1112 |
+
backdrop_json_path = os.path.join(matched_folder, 'project.json')
|
1113 |
+
if os.path.exists(backdrop_json_path):
|
1114 |
+
with open(backdrop_json_path, 'r') as f:
|
1115 |
+
backdrop_json_data = json.load(f)
|
1116 |
+
for target in backdrop_json_data.get("targets", []):
|
1117 |
+
if target.get("isStage"):
|
1118 |
+
backdrop_data.append(target)
|
1119 |
+
for fname in os.listdir(matched_folder):
|
1120 |
+
if fname not in {os.path.basename(matched_image_path), 'project.json'}:
|
1121 |
+
shutil.copy2(os.path.join(
|
1122 |
+
matched_folder, fname), project_folder)'''
|
1123 |
+
|
1124 |
+
# Merge JSON structure
|
1125 |
+
final_project = {
|
1126 |
+
"targets": [],
|
1127 |
+
"monitors": [],
|
1128 |
+
"extensions": [],
|
1129 |
+
"meta": {
|
1130 |
+
"semver": "3.0.0",
|
1131 |
+
"vm": "11.3.0",
|
1132 |
+
"agent": "OpenAI ScratchVision Agent"
|
1133 |
+
}
|
1134 |
+
}
|
1135 |
+
|
1136 |
+
for sprite in project_data:
|
1137 |
+
if not sprite.get("isStage", False):
|
1138 |
+
final_project["targets"].append(sprite)
|
1139 |
+
|
1140 |
+
if backdrop_data:
|
1141 |
+
all_costumes, sounds = [], []
|
1142 |
+
for idx, bd in enumerate(backdrop_data):
|
1143 |
+
all_costumes.extend(bd.get("costumes", []))
|
1144 |
+
if idx == 0 and "sounds" in bd:
|
1145 |
+
sounds = bd["sounds"]
|
1146 |
+
final_project["targets"].append({
|
1147 |
+
"isStage": True,
|
1148 |
+
"name": "Stage",
|
1149 |
+
"variables": {},
|
1150 |
+
"lists": {},
|
1151 |
+
"broadcasts": {},
|
1152 |
+
"blocks": {},
|
1153 |
+
"comments": {},
|
1154 |
+
"currentCostume": 1 if len(all_costumes) > 1 else 0,
|
1155 |
+
"costumes": all_costumes,
|
1156 |
+
"sounds": sounds,
|
1157 |
+
"volume": 100,
|
1158 |
+
"layerOrder": 0,
|
1159 |
+
"tempo": 60,
|
1160 |
+
"videoTransparency": 50,
|
1161 |
+
"videoState": "on",
|
1162 |
+
"textToSpeechLanguage": None
|
1163 |
+
})
|
1164 |
+
|
1165 |
+
with open(project_json_path, 'w') as f:
|
1166 |
+
json.dump(final_project, f, indent=2)
|
1167 |
+
|
1168 |
+
# logger.info(f"🎉 Final project saved: {project_json_path}")
|
1169 |
+
return project_json_path
|
1170 |
+
|
1171 |
+
# --- ASYNC PDF to Image Conversion ---
|
1172 |
+
async def convert_pdf_to_images_async(pdf_path, dpi=300):
|
1173 |
+
pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
|
1174 |
+
output_image_folder = os.path.join(IMAGE_FOLDER_PATH, pdf_name)
|
1175 |
+
loop = asyncio.get_event_loop()
|
1176 |
+
with ThreadPoolExecutor() as pool:
|
1177 |
+
# Pass poppler_path explicitly
|
1178 |
+
result = await loop.run_in_executor(
|
1179 |
+
pool, convert_pdf_to_images_sync, pdf_path, output_image_folder, dpi, poppler_path
|
1180 |
+
)
|
1181 |
+
return result
|
1182 |
+
|
1183 |
+
# Blocking version used internally
|
1184 |
+
def convert_pdf_to_images_sync(pdf_path, output_image_folder, dpi, poppler_path):
|
1185 |
+
pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
|
1186 |
+
output_image_folder = os.path.join("outputs", "SCANNED_IMAGE", pdf_name)
|
1187 |
+
os.makedirs(output_image_folder, exist_ok=True)
|
1188 |
+
|
1189 |
+
print(f"[INFO] Converting PDF: {pdf_path}")
|
1190 |
+
print(f"[INFO] Output folder: {output_image_folder}")
|
1191 |
+
print(f"[INFO] Using Poppler path: {poppler_path}")
|
1192 |
+
try:
|
1193 |
+
images = convert_from_path(pdf_path, dpi=dpi, poppler_path=poppler_path)
|
1194 |
+
image_paths = []
|
1195 |
+
for i, img in enumerate(images):
|
1196 |
+
output_path = os.path.join(output_image_folder, f"page_{i+1}.png")
|
1197 |
+
img.save(output_path, "PNG")
|
1198 |
+
print(f"[DEBUG] Saved: {output_path}")
|
1199 |
+
image_paths.append(output_path)
|
1200 |
+
return image_paths
|
1201 |
+
except PDFInfoNotInstalledError as e:
|
1202 |
+
raise RuntimeError(f"Poppler not installed or path incorrect: {str(e)}")
|
1203 |
+
except Exception as e:
|
1204 |
+
print(f"[ERROR] Failed to convert PDF: {e}")
|
1205 |
+
raise
|
1206 |
+
|
1207 |
+
def delay_for_tpm_node(state: GameState):
|
1208 |
+
logger.info("--- Running DelayForTPMNode ---")
|
1209 |
+
time.sleep(10) # Adjust the delay as needed
|
1210 |
+
logger.info("Delay completed.")
|
1211 |
+
return state
|
1212 |
+
|
1213 |
+
# Build the LangGraph workflow
|
1214 |
+
workflow = StateGraph(GameState)
|
1215 |
+
|
1216 |
+
# Add all nodes to the workflow
|
1217 |
+
workflow.add_node("time_delay_1", delay_for_tpm_node)
|
1218 |
+
workflow.add_node("opcode_counter", plan_logic_aligner_node)
|
1219 |
+
workflow.set_entry_point("time_delay_1")
|
1220 |
+
workflow.add_edge("time_delay_1","opcode_counter")
|
1221 |
+
workflow.add_edge("opcode_counter", END)
|
1222 |
+
app_graph = workflow.compile()
|
1223 |
+
|
1224 |
+
def get_desc_pseudo(image_path: str, project_folder: str) -> dict:
|
1225 |
+
"""
|
1226 |
+
Takes a path to a code-block image and returns a dict with:
|
1227 |
+
- 'pseudo_code': pseudo-code representing logic in Scratch block format
|
1228 |
+
Stores the output into outputs/pseudo_output.json
|
1229 |
+
"""
|
1230 |
+
try:
|
1231 |
+
# Load image and encode to base64
|
1232 |
+
with open(image_path, "rb") as image_file:
|
1233 |
+
image_bytes = image_file.read()
|
1234 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
1235 |
+
|
1236 |
+
# === CALL PLAN LOGIC ALIGNER ===
|
1237 |
+
logic_refined = plan_logic_aligner_node(state={"image": img_base64})
|
1238 |
+
|
1239 |
+
# --- Extract fields ---
|
1240 |
+
refined = logic_refined.get("refined_logic", {})
|
1241 |
+
name_variable = refined.get("name_variable", "Unknown")
|
1242 |
+
pseudo_code_raw = refined.get("pseudocode", "No logic extracted")
|
1243 |
+
|
1244 |
+
# === Save to JSON ===
|
1245 |
+
output_json_path = os.path.join(project_folder, "pseudo_output.json")
|
1246 |
+
os.makedirs(os.path.dirname(output_json_path), exist_ok=True)
|
1247 |
+
|
1248 |
+
if os.path.exists(output_json_path):
|
1249 |
+
with open(output_json_path, "r") as f:
|
1250 |
+
existing = json.load(f)
|
1251 |
+
else:
|
1252 |
+
existing = {}
|
1253 |
+
|
1254 |
+
if name_variable not in existing:
|
1255 |
+
existing[name_variable] = []
|
1256 |
+
|
1257 |
+
existing[name_variable].append({
|
1258 |
+
"pseudo_code":pseudo_code_raw
|
1259 |
+
})
|
1260 |
+
|
1261 |
+
with open(output_json_path, "w") as f:
|
1262 |
+
json.dump(existing, f, indent=2)
|
1263 |
+
|
1264 |
+
result = {
|
1265 |
+
"name_variable": name_variable,
|
1266 |
+
"pseudo_code": pseudo_code_raw
|
1267 |
+
}
|
1268 |
+
logger.info(f"✅ Saved pseudo-code to: {output_json_path}")
|
1269 |
+
initial_state_dict = {
|
1270 |
+
# "project_json": project_skeleton,
|
1271 |
+
# "description": desc,
|
1272 |
+
# "project_id": project_id,
|
1273 |
+
"image": img_base64,
|
1274 |
+
"pseudo_node":{}
|
1275 |
+
}
|
1276 |
+
|
1277 |
+
state = app_graph.invoke(initial_state_dict)
|
1278 |
+
final_project_json = state['pseudo_node']
|
1279 |
+
return result
|
1280 |
+
except Exception as e:
|
1281 |
+
logger.error(f"❌ get_desc_pseudo failed for {image_path}: {e}")
|
1282 |
+
return {
|
1283 |
+
"image_path": image_path,
|
1284 |
+
"error": str(e)
|
1285 |
+
}
|
1286 |
+
|
1287 |
+
# ============== Helper function to Upscale an Image ============== #
|
1288 |
+
def upscale_image(image: Image.Image, scale: int = 2) -> Image.Image:
|
1289 |
+
"""
|
1290 |
+
Upscales a PIL image by a given scale factor.
|
1291 |
+
"""
|
1292 |
+
try:
|
1293 |
+
width, height = image.size
|
1294 |
+
new_size = (width * scale, height * scale)
|
1295 |
+
upscaled_image = image.resize(new_size, Image.LANCZOS)
|
1296 |
+
logger.info(f"✅ Upscaled image to {new_size}")
|
1297 |
+
return upscaled_image
|
1298 |
+
except Exception as e:
|
1299 |
+
logger.error(f"❌ Error during image upscaling: {str(e)}")
|
1300 |
+
return image
|
1301 |
+
|
1302 |
+
@app.route('/')
|
1303 |
+
def index():
|
1304 |
+
return render_template('app_index.html')
|
1305 |
+
|
1306 |
+
# API endpoint
|
1307 |
+
@app.route('/process_pdf', methods=['POST'])
|
1308 |
+
async def process_pdf():
|
1309 |
+
try:
|
1310 |
+
logger.info("Received request to process PDF.")
|
1311 |
+
if 'pdf_file' not in request.files:
|
1312 |
+
logger.warning("No PDF file found in request.")
|
1313 |
+
return jsonify({"error": "Missing PDF file in form-data with key 'pdf_file'"}), 400
|
1314 |
+
|
1315 |
+
pdf_file = request.files['pdf_file']
|
1316 |
+
if pdf_file.filename == '':
|
1317 |
+
return jsonify({"error": "Empty filename"}), 400
|
1318 |
+
|
1319 |
+
# # Create unique project folder
|
1320 |
+
# random_id = str(uuid.uuid4()).replace("-", "")
|
1321 |
+
# project_folder = os.path.join("outputs", f"project_{random_id}")
|
1322 |
+
# os.makedirs(project_folder, exist_ok=True)
|
1323 |
+
|
1324 |
+
# ================================================= #
|
1325 |
+
# Generate Random UUID for project folder name #
|
1326 |
+
# ================================================= #
|
1327 |
+
random_id = str(uuid.uuid4()).replace('-', '')
|
1328 |
+
project_folder = os.path.join("outputs", f"project_{random_id}")
|
1329 |
+
|
1330 |
+
# =========================================================================== #
|
1331 |
+
# Create empty json in project_{random_id} folder #
|
1332 |
+
# =========================================================================== #
|
1333 |
+
os.makedirs(project_folder, exist_ok=True)
|
1334 |
+
|
1335 |
+
# Save the uploaded PDF temporarily
|
1336 |
+
filename = secure_filename(pdf_file.filename)
|
1337 |
+
temp_dir = tempfile.mkdtemp()
|
1338 |
+
saved_pdf_path = os.path.join(temp_dir, filename)
|
1339 |
+
pdf_file.save(saved_pdf_path)
|
1340 |
+
|
1341 |
+
# logger.info(f"Created project folder: {project_folder}")
|
1342 |
+
logger.info(f"Saved uploaded PDF to: {saved_pdf_path}")
|
1343 |
+
|
1344 |
+
# Extract & process
|
1345 |
+
json_path = None
|
1346 |
+
output_path, result = extract_images_from_pdf(saved_pdf_path, json_path)
|
1347 |
+
|
1348 |
+
# Check extracted_sprites.json for "scratch block" in any 'name'
|
1349 |
+
extracted_dir = os.path.join(JSON_FOLDER_PATH, os.path.splitext(filename)[0])
|
1350 |
+
extracted_sprites_json = os.path.join(extracted_dir, "extracted_sprites.json")
|
1351 |
+
|
1352 |
+
if not os.path.exists(extracted_sprites_json):
|
1353 |
+
return jsonify({"error": "No extracted_sprites.json found"}), 500
|
1354 |
+
|
1355 |
+
with open(extracted_sprites_json, 'r') as f:
|
1356 |
+
sprite_data = json.load(f)
|
1357 |
+
|
1358 |
+
# for kw in scratch_keywords:
|
1359 |
+
# if not any(kw in sprite.get("name", "").lower() for sprite in sprite_data.values()):
|
1360 |
+
# return jsonify({"message": "Invalid Content"}), 400
|
1361 |
+
|
1362 |
+
# add this logic in "extract_images_from_pdf() after manipulated_json"
|
1363 |
+
# for kw in scratch_keywords:
|
1364 |
+
# if not any(kw in sprite.get("name", "").lower() for sprite in sprite_data.values()):
|
1365 |
+
# logging.warning("⚠️ No Code-blocks found, re-scan an image.")
|
1366 |
+
# return jsonify({"message": "No-Code blocks found, Re-scan an Image"}), 400
|
1367 |
+
|
1368 |
+
# if not result or len(result) == 0:
|
1369 |
+
# return jsonify({"error": "Invalid Content"}), 400
|
1370 |
+
|
1371 |
+
# def has_valid_scratch_block(sprites_dict):
|
1372 |
+
# for s in sprites_dict.values():
|
1373 |
+
# name = s.get("name", "").lower()
|
1374 |
+
# if "scratch blocks"
|
1375 |
+
project_output = similarity_matching(output_path, project_folder)
|
1376 |
+
logger.info("Received request to process PDF.")
|
1377 |
+
|
1378 |
+
# Call the async function from sync code
|
1379 |
+
try:
|
1380 |
+
image_paths = await convert_pdf_to_images_async(saved_pdf_path)
|
1381 |
+
print("PDF converted to images:", image_paths)
|
1382 |
+
|
1383 |
+
pseudo_results = [get_desc_pseudo(img_path, project_folder) for img_path in image_paths]
|
1384 |
+
except Exception as e:
|
1385 |
+
print(f"Error processing PDF: {e}")
|
1386 |
+
|
1387 |
+
return jsonify({
|
1388 |
+
"message": "✅ PDF processed successfully",
|
1389 |
+
"output_json": output_path,
|
1390 |
+
"sprites": result,
|
1391 |
+
"project_output_json": project_output,
|
1392 |
+
"scanned_images": image_paths,
|
1393 |
+
# "scanned_image_pseudo": pseudo_results
|
1394 |
+
})
|
1395 |
+
except Exception as e:
|
1396 |
+
logger.exception("❌ Failed to process PDF")
|
1397 |
+
return jsonify({"error": f"❌ Failed to process PDF: {str(e)}"}), 500
|
1398 |
+
|
1399 |
+
if __name__ == '__main__':
|
1400 |
+
app.run(host='0.0.0.0', port=7860, debug=True)
|
app_main.py
CHANGED
@@ -1,500 +1,623 @@
|
|
1 |
-
from flask import Flask, render_template, Response, flash, redirect, url_for, request, jsonify
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
from unstructured.partition.pdf import partition_pdf
|
5 |
-
import
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
-
import
|
13 |
-
from
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
#
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
)
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
""
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
image = Image.
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
image.
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
"
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
}
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
{
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
{
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
]
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
#
|
235 |
-
#
|
236 |
-
|
237 |
-
#
|
238 |
-
#
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
}
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
#
|
322 |
-
|
323 |
-
#
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
#
|
331 |
-
#
|
332 |
-
#
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
#
|
337 |
-
|
338 |
-
#
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
# ============================== #
|
356 |
-
#
|
357 |
-
# ============================== #
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
"
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
#
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, Response, flash, redirect, url_for, request, jsonify
|
2 |
+
import cv2, json,base64,io,os,tempfile,torch,logging
|
3 |
+
import numpy as np
|
4 |
+
from unstructured.partition.pdf import partition_pdf
|
5 |
+
from PIL import Image, ImageEnhance, ImageDraw
|
6 |
+
from imutils.perspective import four_point_transform
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import pytesseract
|
9 |
+
from transformers import AutoProcessor, AutoModelForImageTextToText, AutoModelForVision2Seq
|
10 |
+
from langchain_community.document_loaders.image_captions import ImageCaptionLoader
|
11 |
+
from werkzeug.utils import secure_filename
|
12 |
+
from langchain_groq import ChatGroq
|
13 |
+
from langgraph.prebuilt import create_react_agent
|
14 |
+
|
15 |
+
# Configure logging
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.DEBUG, # Use INFO or ERROR in production
|
18 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
19 |
+
handlers=[
|
20 |
+
logging.FileHandler("app.log"),
|
21 |
+
logging.StreamHandler()
|
22 |
+
]
|
23 |
+
)
|
24 |
+
|
25 |
+
logger = logging.getLogger(__name__)
|
26 |
+
|
27 |
+
load_dotenv()
|
28 |
+
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
29 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
30 |
+
|
31 |
+
llm = ChatGroq(
|
32 |
+
model="meta-llama/llama-4-maverick-17b-128e-instruct",
|
33 |
+
temperature=0,
|
34 |
+
max_tokens=None,
|
35 |
+
)
|
36 |
+
|
37 |
+
app = Flask(__name__)
|
38 |
+
|
39 |
+
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
|
40 |
+
poppler_path = r"C:\poppler-23.11.0\Library\bin"
|
41 |
+
|
42 |
+
count = 0
|
43 |
+
PDF_GET = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\scratch_crab.pdf"
|
44 |
+
|
45 |
+
OUTPUT_FOLDER = "OUTPUTS"
|
46 |
+
DETECTED_IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "DETECTED_IMAGE")
|
47 |
+
IMAGE_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "SCANNED_IMAGE")
|
48 |
+
JSON_FOLDER_PATH = os.path.join(OUTPUT_FOLDER, "EXTRACTED_JSON")
|
49 |
+
|
50 |
+
for path in [OUTPUT_FOLDER, IMAGE_FOLDER_PATH, DETECTED_IMAGE_FOLDER_PATH, JSON_FOLDER_PATH]:
|
51 |
+
os.makedirs(path, exist_ok=True)
|
52 |
+
|
53 |
+
# Model Initialization
|
54 |
+
try:
|
55 |
+
smolvlm256m_processor = AutoProcessor.from_pretrained(
|
56 |
+
"HuggingFaceTB/SmolVLM-256M-Instruct")
|
57 |
+
# smolvlm256m_model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM-256M-Instruct").to("cpu")
|
58 |
+
smolvlm256m_model = AutoModelForVision2Seq.from_pretrained(
|
59 |
+
"HuggingFaceTB/SmolVLM-256M-Instruct",
|
60 |
+
torch_dtype=torch.bfloat16 if hasattr(
|
61 |
+
torch, "bfloat16") else torch.float32,
|
62 |
+
_attn_implementation="eager"
|
63 |
+
).to("cpu")
|
64 |
+
except Exception as e:
|
65 |
+
raise RuntimeError(f"❌ Failed to load SmolVLM model: {str(e)}")
|
66 |
+
|
67 |
+
# SmolVLM Image Captioning functioning
|
68 |
+
def get_smolvlm_caption(image: Image.Image, prompt: str = "") -> str:
|
69 |
+
try:
|
70 |
+
# Ensure exactly one <image> token
|
71 |
+
if "<image>" not in prompt:
|
72 |
+
prompt = f"<image> {prompt.strip()}"
|
73 |
+
|
74 |
+
num_image_tokens = prompt.count("<image>")
|
75 |
+
if num_image_tokens != 1:
|
76 |
+
raise ValueError(
|
77 |
+
f"Prompt must contain exactly 1 <image> token. Found {num_image_tokens}")
|
78 |
+
|
79 |
+
inputs = smolvlm256m_processor(
|
80 |
+
images=[image], text=[prompt], return_tensors="pt").to("cpu")
|
81 |
+
output_ids = smolvlm256m_model.generate(**inputs, max_new_tokens=100)
|
82 |
+
return smolvlm256m_processor.decode(output_ids[0], skip_special_tokens=True)
|
83 |
+
except Exception as e:
|
84 |
+
return f"❌ Error during caption generation: {str(e)}"
|
85 |
+
|
86 |
+
# --- FUNCTION: Extract images from saved PDF ---
|
87 |
+
def extract_images_from_pdf(pdf_path, final_json_path_2):
|
88 |
+
''' Extract images from PDF and generate structured sprite JSON '''
|
89 |
+
|
90 |
+
try:
|
91 |
+
pdf_filename = os.path.splitext(os.path.basename(pdf_path))[
|
92 |
+
0] # e.g., "scratch_crab"
|
93 |
+
pdf_dir_path = os.path.dirname(pdf_path).replace("/", "\\")
|
94 |
+
|
95 |
+
# Create subfolders
|
96 |
+
extracted_image_subdir = os.path.join(
|
97 |
+
DETECTED_IMAGE_FOLDER_PATH, pdf_filename)
|
98 |
+
json_subdir = os.path.join(JSON_FOLDER_PATH, pdf_filename)
|
99 |
+
os.makedirs(extracted_image_subdir, exist_ok=True)
|
100 |
+
os.makedirs(json_subdir, exist_ok=True)
|
101 |
+
|
102 |
+
# Output paths
|
103 |
+
output_json_path = os.path.join(json_subdir, "extracted.json")
|
104 |
+
final_json_path = os.path.join(json_subdir, "extracted_sprites.json")
|
105 |
+
final_json_path_2 = os.path.join(json_subdir, "extracted_sprites_2.json")
|
106 |
+
|
107 |
+
try:
|
108 |
+
elements = partition_pdf(
|
109 |
+
filename=pdf_path,
|
110 |
+
strategy="hi_res",
|
111 |
+
extract_image_block_types=["Image"],
|
112 |
+
extract_image_block_to_payload=True, # Set to True to get base64 in output
|
113 |
+
)
|
114 |
+
except Exception as e:
|
115 |
+
raise RuntimeError(
|
116 |
+
f"❌ Failed to extract images from PDF: {str(e)}")
|
117 |
+
|
118 |
+
try:
|
119 |
+
with open(output_json_path, "w") as f:
|
120 |
+
json.dump([element.to_dict()
|
121 |
+
for element in elements], f, indent=4)
|
122 |
+
except Exception as e:
|
123 |
+
raise RuntimeError(f"❌ Failed to write extracted.json: {str(e)}")
|
124 |
+
|
125 |
+
try:
|
126 |
+
# Display extracted images
|
127 |
+
with open(output_json_path, 'r') as file:
|
128 |
+
file_elements = json.load(file)
|
129 |
+
except Exception as e:
|
130 |
+
raise RuntimeError(f"❌ Failed to read extracted.json: {str(e)}")
|
131 |
+
|
132 |
+
# Prepare manipulated sprite JSON structure
|
133 |
+
manipulated_json = {}
|
134 |
+
|
135 |
+
# SET A SYSTEM PROMPT
|
136 |
+
system_prompt = """
|
137 |
+
You are an expert in visual scene understanding.
|
138 |
+
Your Job is to analyze an image and respond acoording if asked for name give simple name by analyzing it and if ask for descrption generate a short description covering its elements.
|
139 |
+
|
140 |
+
Guidelines:
|
141 |
+
- Focus only the images given in Square Shape.
|
142 |
+
- Don't Consider Blank areas in Image as.
|
143 |
+
- Don't include generic summary or explanation outside the fields.
|
144 |
+
Return only string.
|
145 |
+
"""
|
146 |
+
|
147 |
+
agent = create_react_agent(
|
148 |
+
model=llm,
|
149 |
+
tools=[],
|
150 |
+
prompt=system_prompt
|
151 |
+
)
|
152 |
+
|
153 |
+
# If JSON already exists, load it and find the next available Sprite number
|
154 |
+
if os.path.exists(final_json_path):
|
155 |
+
with open(final_json_path, "r") as existing_file:
|
156 |
+
manipulated = json.load(existing_file)
|
157 |
+
# Determine the next available index (e.g., Sprite 4 if 1–3 already exist)
|
158 |
+
existing_keys = [int(k.replace("Sprite ", ""))
|
159 |
+
for k in manipulated.keys()]
|
160 |
+
start_count = max(existing_keys, default=0) + 1
|
161 |
+
else:
|
162 |
+
start_count = 1
|
163 |
+
|
164 |
+
sprite_count = start_count
|
165 |
+
for i, element in enumerate(file_elements):
|
166 |
+
if "image_base64" in element["metadata"]:
|
167 |
+
try:
|
168 |
+
image_data = base64.b64decode(
|
169 |
+
element["metadata"]["image_base64"])
|
170 |
+
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
171 |
+
# image.show(title=f"Extracted Image {i+1}")
|
172 |
+
image_path = os.path.join(
|
173 |
+
extracted_image_subdir, f"Sprite_{i+1}.png")
|
174 |
+
image.save(image_path)
|
175 |
+
|
176 |
+
with open(image_path, "rb") as image_file:
|
177 |
+
image_bytes = image_file.read()
|
178 |
+
img_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
179 |
+
# description = get_smolvlm_caption(image, prompt="Give a brief Description")
|
180 |
+
# name = get_smolvlm_caption(image, prompt="give a short name/title of this Image.")
|
181 |
+
|
182 |
+
# def clean_caption_output(raw_output: str, prompt: str) -> str:
|
183 |
+
# answer = raw_output.replace(prompt, '').replace(
|
184 |
+
# "<image>", '').strip(" :-\n")
|
185 |
+
# return answer
|
186 |
+
|
187 |
+
prompt_description = "Give a brief Captioning. If any Image include the text or any logical block structure give it name as default 'scratch blocks'"
|
188 |
+
prompt_name = "give a short name caption of this Image. If any Image include the text or any logical block structure give it description as default 'scratch blocks'"
|
189 |
+
|
190 |
+
content1 = [
|
191 |
+
{
|
192 |
+
"type": "text",
|
193 |
+
"text": f"{prompt_description}"
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"type": "image_url",
|
197 |
+
"image_url": {
|
198 |
+
"url": f"data:image/jpeg;base64,{img_base64}"
|
199 |
+
}
|
200 |
+
}
|
201 |
+
]
|
202 |
+
response1 = agent.invoke(
|
203 |
+
{"messages": [{"role": "user", "content": content1}]})
|
204 |
+
# print(response1)
|
205 |
+
description = response1["messages"][-1].content
|
206 |
+
|
207 |
+
content2 = [
|
208 |
+
{
|
209 |
+
"type": "text",
|
210 |
+
"text": f"{prompt_name}"
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"type": "image_url",
|
214 |
+
"image_url": {
|
215 |
+
"url": f"data:image/jpeg;base64,{img_base64}"
|
216 |
+
}
|
217 |
+
}
|
218 |
+
]
|
219 |
+
|
220 |
+
response2 = agent.invoke(
|
221 |
+
{"messages": [{"role": "user", "content": content2}]})
|
222 |
+
# print(response2)
|
223 |
+
name = response2["messages"][-1].content
|
224 |
+
|
225 |
+
# def is_valid_sprite_or_backdrop(desc):
|
226 |
+
# desc = desc.lower()
|
227 |
+
# sprite_keywords = ["character", "cartoon", "sprite", "figure", "animal", "person", "robot", "creature"]
|
228 |
+
# backdrop_keywords = ["scene", "background", "forest", "room", "underwater", "sky", "ocean", "mountain", "city", "village"]
|
229 |
+
# if any(kw in desc for kw in sprite_keywords + backdrop_keywords):
|
230 |
+
# return True
|
231 |
+
# return False
|
232 |
+
|
233 |
+
# if not is_valid_sprite_or_backdrop(description):
|
234 |
+
# logger.warning(f"�� Skipped non-sprite/backdrop image {i+1}: {description}")
|
235 |
+
# continue
|
236 |
+
|
237 |
+
# def is_scratch_code_block(desc):
|
238 |
+
# desc = desc.lower()
|
239 |
+
# scratch_block_keyword = ["Scratch", "Code", "scratch blocks","move", "steps", "degree",
|
240 |
+
# "turn","switch","go to","random position", "glide", "direction",
|
241 |
+
# "hide variable"]
|
242 |
+
# return any(kw in desc for kw in scratch_block_keyword)
|
243 |
+
|
244 |
+
# if is_scratch_code_block(description):
|
245 |
+
# logger.warning(f"⛔ Skipped code block image {i+1}: {description}")
|
246 |
+
# logger.info(is_scratch_code_block(description))
|
247 |
+
# continue
|
248 |
+
|
249 |
+
# raw_description = get_smolvlm_caption(image, prompt=prompt_description)
|
250 |
+
# raw_name = get_smolvlm_caption(image, prompt=prompt_name)
|
251 |
+
|
252 |
+
# description = clean_caption_output(raw_description, prompt_description)
|
253 |
+
# name = clean_caption_output(raw_name, prompt_name)
|
254 |
+
|
255 |
+
manipulated_json[f"Sprite {sprite_count}"] = {
|
256 |
+
"name": name,
|
257 |
+
"base64": element["metadata"]["image_base64"],
|
258 |
+
"file-path": pdf_dir_path,
|
259 |
+
"description": description
|
260 |
+
}
|
261 |
+
sprite_count += 1
|
262 |
+
except Exception as e:
|
263 |
+
print(f"⚠️ Error processing Sprite {i+1}: {str(e)}")
|
264 |
+
|
265 |
+
scratch_keywords = [
|
266 |
+
"move", "turn", "wait", "repeat", "if", "else", "broadcast",
|
267 |
+
"glide", "change", "forever", "when", "switch",
|
268 |
+
"next costume", "set", "show", "hide", "play sound",
|
269 |
+
"go to", "x position", "y position", "think", "say",
|
270 |
+
"variable", "stop", "clone",
|
271 |
+
"touching", "sensing", "pen", "clear","Scratch","Code","scratch blocks"
|
272 |
+
]
|
273 |
+
# # New dictionary to store only valid sprites
|
274 |
+
# filtered_sprites = {}
|
275 |
+
# for sprite_id, sprite_data in manipulated_json.items():
|
276 |
+
# desc = sprite_data.get("description", "").lower()
|
277 |
+
# # If no scratch block-like word found, accept this sprite
|
278 |
+
# if not any(keyword in desc for keyword in scratch_keywords):
|
279 |
+
# filtered_sprites[sprite_id] = sprite_data
|
280 |
+
# else:
|
281 |
+
# logger.info(f"🧱 Detected Scratch code block in: {sprite_id}, skipping...")
|
282 |
+
|
283 |
+
# Save manipulated JSON
|
284 |
+
with open(final_json_path, "w") as sprite_file:
|
285 |
+
json.dump(manipulated_json, sprite_file, indent=4)
|
286 |
+
|
287 |
+
def is_code_block(name: str) -> bool:
|
288 |
+
for kw in scratch_keywords:
|
289 |
+
if kw.lower() in name.lower():
|
290 |
+
return True
|
291 |
+
return False
|
292 |
+
|
293 |
+
# Filter out code block images
|
294 |
+
filtered_sprites = {}
|
295 |
+
for key, value in manipulated_json.items():
|
296 |
+
sprite_name = value.get("name", "")
|
297 |
+
if not is_code_block(sprite_name):
|
298 |
+
filtered_sprites[key] = value
|
299 |
+
else:
|
300 |
+
logger.info(f"🛑 Excluded code block-like image: {key}")
|
301 |
+
|
302 |
+
# Overwrite with filtered content
|
303 |
+
with open(final_json_path_2, "w") as sprite_file:
|
304 |
+
json.dump(filtered_sprites, sprite_file, indent=4)
|
305 |
+
# print(f"✅ Manipulated sprite JSON saved: {final_json_path}")
|
306 |
+
return final_json_path, manipulated_json
|
307 |
+
except Exception as e:
|
308 |
+
raise RuntimeError(f"❌ Error in extract_images_from_pdf: {str(e)}")
|
309 |
+
|
310 |
+
|
311 |
+
def similarity_matching(input_json_path: str) -> str:
|
312 |
+
import uuid
|
313 |
+
import shutil
|
314 |
+
import tempfile
|
315 |
+
from langchain_experimental.open_clip.open_clip import OpenCLIPEmbeddings
|
316 |
+
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
|
317 |
+
from io import BytesIO
|
318 |
+
|
319 |
+
logger.info("🔍 Running similarity matching...")
|
320 |
+
|
321 |
+
# ============================== #
|
322 |
+
# DEFINE PATHS #
|
323 |
+
# ============================== #
|
324 |
+
backdrop_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\Backdrops"
|
325 |
+
sprite_images_path = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\images\sprites"
|
326 |
+
# backdrop_images_path = os.getenv("BACKDROP_FOLDER_PATH", "/app/reference/backdrops")
|
327 |
+
# sprite_images_path = os.getenv("SPRITE_FOLDER_PATH", "/app/reference/sprites")
|
328 |
+
image_dirs = [backdrop_images_path, sprite_images_path]
|
329 |
+
|
330 |
+
# ================================================= #
|
331 |
+
# Generate Random UUID for project folder name #
|
332 |
+
# ================================================= #
|
333 |
+
random_id = str(uuid.uuid4()).replace('-', '')
|
334 |
+
project_folder = os.path.join("outputs", f"project_{random_id}")
|
335 |
+
|
336 |
+
# =========================================================================== #
|
337 |
+
# Create empty json in project_{random_id} folder #
|
338 |
+
# =========================================================================== #
|
339 |
+
os.makedirs(project_folder, exist_ok=True)
|
340 |
+
project_json_path = os.path.join(project_folder, "project.json")
|
341 |
+
|
342 |
+
# ============================== #
|
343 |
+
# READ SPRITE METADATA #
|
344 |
+
# ============================== #
|
345 |
+
with open(input_json_path, 'r') as f:
|
346 |
+
sprites_data = json.load(f)
|
347 |
+
|
348 |
+
sprite_ids, texts, sprite_base64 = [], [], []
|
349 |
+
for sid, sprite in sprites_data.items():
|
350 |
+
sprite_ids.append(sid)
|
351 |
+
texts.append(
|
352 |
+
"This is " + sprite.get("description", sprite.get("name", "")))
|
353 |
+
sprite_base64.append(sprite["base64"])
|
354 |
+
|
355 |
+
# ============================== #
|
356 |
+
# INITIALIZE CLIP EMBEDDER #
|
357 |
+
# ============================== #
|
358 |
+
clip_embd = OpenCLIPEmbeddings()
|
359 |
+
|
360 |
+
# ========================================= #
|
361 |
+
# Walk folders to collect all image paths #
|
362 |
+
# ========================================= #
|
363 |
+
folder_image_paths = []
|
364 |
+
for image_dir in image_dirs:
|
365 |
+
for root, _, files in os.walk(image_dir):
|
366 |
+
for fname in files:
|
367 |
+
if fname.lower().endswith((".png", ".jpg", ".jpeg")):
|
368 |
+
folder_image_paths.append(os.path.join(root, fname))
|
369 |
+
|
370 |
+
# # ============================== #
|
371 |
+
# # EMBED FOLDER IMAGES (REF) #
|
372 |
+
# # ============================== #
|
373 |
+
# img_features = clip_embd.embed_image(folder_image_paths)
|
374 |
+
|
375 |
+
# # ============================== #
|
376 |
+
# # Store image embeddings #
|
377 |
+
# # ============================== #
|
378 |
+
# embedding_json = []
|
379 |
+
# for i, path in enumerate(folder_image_paths):
|
380 |
+
# embedding_json.append({
|
381 |
+
# "name":os.path.basename(path),
|
382 |
+
# "file-path": path,
|
383 |
+
# "embeddings": list(img_features[i])
|
384 |
+
# })
|
385 |
+
|
386 |
+
# # Save to embeddings.json
|
387 |
+
# with open(f"{OUTPUT_FOLDER}/embeddings.json", "w") as f:
|
388 |
+
# json.dump(embedding_json, f, indent=2)
|
389 |
+
|
390 |
+
# ============================== #
|
391 |
+
# DECODE SPRITE IMAGES #
|
392 |
+
# ============================== #
|
393 |
+
temp_dir = tempfile.mkdtemp()
|
394 |
+
sprite_image_paths = []
|
395 |
+
for idx, b64 in enumerate(sprite_base64):
|
396 |
+
image_data = base64.b64decode(b64.split(",")[-1])
|
397 |
+
img = Image.open(BytesIO(image_data)).convert("RGB")
|
398 |
+
temp_path = os.path.join(temp_dir, f"sprite_{idx}.png")
|
399 |
+
img.save(temp_path)
|
400 |
+
sprite_image_paths.append(temp_path)
|
401 |
+
|
402 |
+
# ============================== #
|
403 |
+
# EMBED SPRITE IMAGES #
|
404 |
+
# ============================== #
|
405 |
+
sprite_features = clip_embd.embed_image(sprite_image_paths)
|
406 |
+
|
407 |
+
# ============================== #
|
408 |
+
# COMPUTE SIMILARITIES #
|
409 |
+
# ============================== #
|
410 |
+
with open(f"{OUTPUT_FOLDER}/embeddings.json", "r") as f:
|
411 |
+
embedding_json = json.load(f)
|
412 |
+
# print(f"\n\n EMBEDDING JSON: {embedding_json}")
|
413 |
+
|
414 |
+
img_matrix = np.array([img["embeddings"] for img in embedding_json])
|
415 |
+
sprite_matrix = np.array(sprite_features)
|
416 |
+
|
417 |
+
similarity = np.matmul(sprite_matrix, img_matrix.T)
|
418 |
+
most_similar_indices = np.argmax(similarity, axis=1)
|
419 |
+
|
420 |
+
# ============= Match and copy ===============
|
421 |
+
project_data = []
|
422 |
+
copied_folders = set()
|
423 |
+
|
424 |
+
# =============================================================== #
|
425 |
+
# Loop through most similar images from Sprites folder #
|
426 |
+
# → Copy sprite assets (excluding matched image + sprite.json) #
|
427 |
+
# → Load sprite.json and append its data to project_data #
|
428 |
+
# =============================================================== #
|
429 |
+
for sprite_idx, matched_idx in enumerate(most_similar_indices):
|
430 |
+
matched_image_path = folder_image_paths[matched_idx]
|
431 |
+
matched_image_path = os.path.normpath(matched_image_path)
|
432 |
+
|
433 |
+
matched_folder = os.path.dirname(matched_image_path)
|
434 |
+
folder_name = os.path.basename(matched_folder)
|
435 |
+
|
436 |
+
if matched_folder in copied_folders:
|
437 |
+
continue
|
438 |
+
copied_folders.add(matched_folder)
|
439 |
+
logger.info(f"Matched image path: {matched_image_path}")
|
440 |
+
|
441 |
+
sprite_json_path = os.path.join(matched_folder, 'sprite.json')
|
442 |
+
if not os.path.exists(sprite_json_path):
|
443 |
+
logger.warning(f"sprite.json not found in: {matched_folder}")
|
444 |
+
continue
|
445 |
+
|
446 |
+
with open(sprite_json_path, 'r') as f:
|
447 |
+
sprite_data = json.load(f)
|
448 |
+
# print(f"SPRITE DATA: \n{sprite_data}")
|
449 |
+
# Copy only non-matched files
|
450 |
+
for fname in os.listdir(matched_folder):
|
451 |
+
fpath = os.path.join(matched_folder, fname)
|
452 |
+
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'sprite.json'}:
|
453 |
+
shutil.copy2(fpath, os.path.join(project_folder, fname))
|
454 |
+
# logger.info(f"Copied Sprite asset: {fname}")
|
455 |
+
project_data.append(sprite_data)
|
456 |
+
|
457 |
+
# ================================================================== #
|
458 |
+
# Loop through most similar images from Backdrops folder #
|
459 |
+
# → Copy Backdrop assets (excluding matched image + project.json) #
|
460 |
+
# → Load project.json and append its data to project_data #
|
461 |
+
# ================================================================== #
|
462 |
+
backdrop_data = [] # for backdrop-related entries
|
463 |
+
|
464 |
+
for backdrop_idx, matched_idx in enumerate(most_similar_indices):
|
465 |
+
matched_image_path = os.path.normpath(folder_image_paths[matched_idx])
|
466 |
+
|
467 |
+
# Check if the match is from the Backdrops folder
|
468 |
+
if matched_image_path.startswith(os.path.normpath(backdrop_images_path)):
|
469 |
+
matched_folder = os.path.dirname(matched_image_path)
|
470 |
+
folder_name = os.path.basename(matched_folder)
|
471 |
+
|
472 |
+
logger.info(f"Backdrop matched image: {matched_image_path}")
|
473 |
+
|
474 |
+
# Copy only non-matched files
|
475 |
+
for fname in os.listdir(matched_folder):
|
476 |
+
fpath = os.path.join(matched_folder, fname)
|
477 |
+
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'project.json'}:
|
478 |
+
shutil.copy2(fpath, os.path.join(project_folder, fname))
|
479 |
+
# logger.info(f"Copied Backdrop asset: {fname}")
|
480 |
+
|
481 |
+
# Append backdrop's project.json
|
482 |
+
backdrop_json_path = os.path.join(matched_folder, 'project.json')
|
483 |
+
if os.path.exists(backdrop_json_path):
|
484 |
+
with open(backdrop_json_path, 'r') as f:
|
485 |
+
backdrop_json_data = json.load(f)
|
486 |
+
# print(f"SPRITE DATA: \n{backdrop_json_data}")
|
487 |
+
if "targets" in backdrop_json_data:
|
488 |
+
for target in backdrop_json_data["targets"]:
|
489 |
+
if target.get("isStage") == True:
|
490 |
+
backdrop_data.append(target)
|
491 |
+
else:
|
492 |
+
logger.warning(f"project.json not found in: {matched_folder}")
|
493 |
+
|
494 |
+
'''
|
495 |
+
project_data, backdrop_data = [], []
|
496 |
+
copied_folders = set()
|
497 |
+
for sprite_idx, matched_idx in enumerate(most_similar_indices):
|
498 |
+
matched_entry = folder_image_paths[matched_idx]
|
499 |
+
# matched_image_path = os.path.normpath(folder_image_paths[matched_idx])
|
500 |
+
matched_image_path = os.path.normpath(matched_entry["file-path"])
|
501 |
+
matched_folder = os.path.dirname(matched_image_path)
|
502 |
+
if matched_folder in copied_folders:
|
503 |
+
continue
|
504 |
+
copied_folders.add(matched_folder)
|
505 |
+
|
506 |
+
# Sprite
|
507 |
+
sprite_json_path = os.path.join(matched_folder, 'sprite.json')
|
508 |
+
if os.path.exists(sprite_json_path):
|
509 |
+
with open(sprite_json_path, 'r') as f:
|
510 |
+
sprite_data = json.load(f)
|
511 |
+
project_data.append(sprite_data)
|
512 |
+
|
513 |
+
for fname in os.listdir(matched_folder):
|
514 |
+
if fname not in {os.path.basename(matched_image_path), 'sprite.json'}:
|
515 |
+
shutil.copy2(os.path.join(
|
516 |
+
matched_folder, fname), project_folder)
|
517 |
+
|
518 |
+
# Backdrop
|
519 |
+
if matched_image_path.startswith(os.path.normpath(backdrop_images_path)):
|
520 |
+
backdrop_json_path = os.path.join(matched_folder, 'project.json')
|
521 |
+
if os.path.exists(backdrop_json_path):
|
522 |
+
with open(backdrop_json_path, 'r') as f:
|
523 |
+
backdrop_json_data = json.load(f)
|
524 |
+
for target in backdrop_json_data.get("targets", []):
|
525 |
+
if target.get("isStage"):
|
526 |
+
backdrop_data.append(target)
|
527 |
+
for fname in os.listdir(matched_folder):
|
528 |
+
if fname not in {os.path.basename(matched_image_path), 'project.json'}:
|
529 |
+
shutil.copy2(os.path.join(
|
530 |
+
matched_folder, fname), project_folder)'''
|
531 |
+
|
532 |
+
# Merge JSON structure
|
533 |
+
final_project = {
|
534 |
+
"targets": [],
|
535 |
+
"monitors": [],
|
536 |
+
"extensions": [],
|
537 |
+
"meta": {
|
538 |
+
"semver": "3.0.0",
|
539 |
+
"vm": "11.3.0",
|
540 |
+
"agent": "OpenAI ScratchVision Agent"
|
541 |
+
}
|
542 |
+
}
|
543 |
+
|
544 |
+
for sprite in project_data:
|
545 |
+
if not sprite.get("isStage", False):
|
546 |
+
final_project["targets"].append(sprite)
|
547 |
+
|
548 |
+
if backdrop_data:
|
549 |
+
all_costumes, sounds = [], []
|
550 |
+
for idx, bd in enumerate(backdrop_data):
|
551 |
+
all_costumes.extend(bd.get("costumes", []))
|
552 |
+
if idx == 0 and "sounds" in bd:
|
553 |
+
sounds = bd["sounds"]
|
554 |
+
final_project["targets"].append({
|
555 |
+
"isStage": True,
|
556 |
+
"name": "Stage",
|
557 |
+
"variables": {},
|
558 |
+
"lists": {},
|
559 |
+
"broadcasts": {},
|
560 |
+
"blocks": {},
|
561 |
+
"comments": {},
|
562 |
+
"currentCostume": 1 if len(all_costumes) > 1 else 0,
|
563 |
+
"costumes": all_costumes,
|
564 |
+
"sounds": sounds,
|
565 |
+
"volume": 100,
|
566 |
+
"layerOrder": 0,
|
567 |
+
"tempo": 60,
|
568 |
+
"videoTransparency": 50,
|
569 |
+
"videoState": "on",
|
570 |
+
"textToSpeechLanguage": None
|
571 |
+
})
|
572 |
+
|
573 |
+
with open(project_json_path, 'w') as f:
|
574 |
+
json.dump(final_project, f, indent=2)
|
575 |
+
|
576 |
+
# logger.info(f"🎉 Final project saved: {project_json_path}")
|
577 |
+
return project_json_path
|
578 |
+
|
579 |
+
@app.route('/')
|
580 |
+
def index():
|
581 |
+
return render_template('app_index.html')
|
582 |
+
|
583 |
+
# API endpoint
|
584 |
+
@app.route('/process_pdf', methods=['POST'])
|
585 |
+
def process_pdf():
|
586 |
+
try:
|
587 |
+
logger.info("Received request to process PDF.")
|
588 |
+
if 'pdf_file' not in request.files:
|
589 |
+
logger.warning("No PDF file found in request.")
|
590 |
+
return jsonify({"error": "Missing PDF file in form-data with key 'pdf_file'"}), 400
|
591 |
+
|
592 |
+
pdf_file = request.files['pdf_file']
|
593 |
+
if pdf_file.filename == '':
|
594 |
+
return jsonify({"error": "Empty filename"}), 400
|
595 |
+
|
596 |
+
# Save the uploaded PDF temporarily
|
597 |
+
filename = secure_filename(pdf_file.filename)
|
598 |
+
temp_dir = tempfile.mkdtemp()
|
599 |
+
saved_pdf_path = os.path.join(temp_dir, filename)
|
600 |
+
pdf_file.save(saved_pdf_path)
|
601 |
+
|
602 |
+
logger.info(f"Saved uploaded PDF to: {saved_pdf_path}")
|
603 |
+
|
604 |
+
# Extract & process
|
605 |
+
json_path = None
|
606 |
+
output_path, result = extract_images_from_pdf(
|
607 |
+
saved_pdf_path, json_path)
|
608 |
+
|
609 |
+
project_output = similarity_matching(output_path)
|
610 |
+
logger.info("Received request to process PDF.")
|
611 |
+
|
612 |
+
return jsonify({
|
613 |
+
"message": "✅ PDF processed successfully",
|
614 |
+
"output_json": output_path,
|
615 |
+
"sprites": result,
|
616 |
+
"project_output_json": project_output
|
617 |
+
})
|
618 |
+
except Exception as e:
|
619 |
+
logger.exception("❌ Failed to process PDF")
|
620 |
+
return jsonify({"error": f"❌ Failed to process PDF: {str(e)}"}), 500
|
621 |
+
|
622 |
+
if __name__ == '__main__':
|
623 |
+
app.run(host='0.0.0.0', port=7860, debug=True)
|
assets_manipulate.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import zipfile
|
3 |
+
import cairosvg
|
4 |
+
import json
|
5 |
+
|
6 |
+
def add_zip_suffix_to_all(folder_path):
|
7 |
+
"""
|
8 |
+
Adds '.zip' suffix to all files in the given folder (except already zipped).
|
9 |
+
"""
|
10 |
+
for filename in os.listdir(folder_path):
|
11 |
+
file_path = os.path.join(folder_path, filename)
|
12 |
+
|
13 |
+
if os.path.isfile(file_path) and not filename.endswith(".zip"):
|
14 |
+
new_filename = filename + ".zip"
|
15 |
+
new_file_path = os.path.join(folder_path, new_filename)
|
16 |
+
|
17 |
+
os.rename(file_path, new_file_path)
|
18 |
+
print(f"Renamed: {filename} -> {new_filename}")
|
19 |
+
|
20 |
+
def extract_all_zip_files(folder_path):
|
21 |
+
"""
|
22 |
+
Extracts all '.zip' files in the given folder into a subfolder
|
23 |
+
with the same name (without .zip).
|
24 |
+
Skips if already extracted.
|
25 |
+
Returns a list of extracted folders.
|
26 |
+
"""
|
27 |
+
extracted_dirs = []
|
28 |
+
for filename in os.listdir(folder_path):
|
29 |
+
if filename.endswith(".zip"):
|
30 |
+
file_path = os.path.join(folder_path, filename)
|
31 |
+
extract_dir = os.path.join(folder_path, filename[:-4]) # remove .zip
|
32 |
+
|
33 |
+
# Skip if already extracted
|
34 |
+
if os.path.exists(extract_dir) and os.listdir(extract_dir):
|
35 |
+
print(f"Skipping: {filename} (already extracted at {extract_dir}/)")
|
36 |
+
extracted_dirs.append(extract_dir)
|
37 |
+
continue
|
38 |
+
|
39 |
+
os.makedirs(extract_dir, exist_ok=True)
|
40 |
+
|
41 |
+
with zipfile.ZipFile(file_path, 'r') as zip_ref:
|
42 |
+
zip_ref.extractall(extract_dir)
|
43 |
+
|
44 |
+
print(f"Extracted: {filename} -> {extract_dir}/")
|
45 |
+
extracted_dirs.append(extract_dir)
|
46 |
+
|
47 |
+
return extracted_dirs
|
48 |
+
|
49 |
+
def convert_svgs_to_pngs(folder_path, delete_original=False):
|
50 |
+
"""
|
51 |
+
Converts all .svg files in a folder (and its subfolders) to .png.
|
52 |
+
Skips if the .png already exists.
|
53 |
+
If delete_original=True, removes the .svg after conversion.
|
54 |
+
"""
|
55 |
+
for root, _, files in os.walk(folder_path):
|
56 |
+
for file in files:
|
57 |
+
if file.endswith(".svg"):
|
58 |
+
svg_path = os.path.join(root, file)
|
59 |
+
png_path = os.path.splitext(svg_path)[0] + ".png"
|
60 |
+
|
61 |
+
# Skip if PNG already exists
|
62 |
+
if os.path.exists(png_path):
|
63 |
+
print(f"Skipping: {svg_path} (PNG already exists)")
|
64 |
+
continue
|
65 |
+
|
66 |
+
# Convert svg to png
|
67 |
+
try:
|
68 |
+
cairosvg.svg2png(url=svg_path, write_to=png_path)
|
69 |
+
print(f"Converted: {svg_path} -> {png_path}")
|
70 |
+
|
71 |
+
if delete_original:
|
72 |
+
os.remove(svg_path)
|
73 |
+
print(f"Deleted original SVG: {svg_path}")
|
74 |
+
|
75 |
+
except Exception as e:
|
76 |
+
print(f"⚠️ Failed to convert {svg_path}: {e}")
|
77 |
+
|
78 |
+
def update_sprite_json(folder_path):
|
79 |
+
"""
|
80 |
+
Opens sprite.json in folder, adds 'objName' key with same value as 'name',
|
81 |
+
and 'layerOrder' with value 0 right after objName.
|
82 |
+
"""
|
83 |
+
sprite_json_path = os.path.join(folder_path, "sprite.json")
|
84 |
+
if os.path.exists(sprite_json_path):
|
85 |
+
try:
|
86 |
+
with open(sprite_json_path, "r", encoding="utf-8") as f:
|
87 |
+
data = json.load(f)
|
88 |
+
|
89 |
+
if "name" in data:
|
90 |
+
if "objName" not in data and "layerOrder" not in data:
|
91 |
+
new_data = {}
|
92 |
+
for key, value in data.items():
|
93 |
+
new_data[key] = value
|
94 |
+
if key == "name":
|
95 |
+
new_data["objName"] = value
|
96 |
+
new_data["layerOrder"] = 0
|
97 |
+
data = new_data
|
98 |
+
|
99 |
+
with open(sprite_json_path, "w", encoding="utf-8") as f:
|
100 |
+
json.dump(data, f, indent=4, ensure_ascii=False)
|
101 |
+
print(f"Updated sprite.json in {folder_path} (added objName & layerOrder)")
|
102 |
+
else:
|
103 |
+
print(f"Skipping sprite.json in {folder_path} (objName/layerOrder already exists)")
|
104 |
+
except Exception as e:
|
105 |
+
print(f"⚠️ Failed to update sprite.json in {folder_path}: {e}")
|
106 |
+
|
107 |
+
# Example usage:
|
108 |
+
if __name__ == "__main__":
|
109 |
+
folder = r"E:\Pratham\2025\Harsh Sir\Scratch Vision\assets\Backdrops"
|
110 |
+
|
111 |
+
# Step 1: Add .zip suffix to files
|
112 |
+
add_zip_suffix_to_all(folder)
|
113 |
+
|
114 |
+
# Step 2: Extract all .zip files (skip if already extracted)
|
115 |
+
extracted_folders = extract_all_zip_files(folder)
|
116 |
+
|
117 |
+
# Step 3: Convert .svg to .png in each extracted folder (skip if .png already exists)
|
118 |
+
for extracted in extracted_folders:
|
119 |
+
convert_svgs_to_pngs(extracted, delete_original=False)
|
120 |
+
|
121 |
+
# Step 4: Update sprite.json (add objName & layerOrder)
|
122 |
+
update_sprite_json(extracted)
|
openclip_embeddings.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test_app.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|