Spaces:
Running
Running
import streamlit as st | |
import ast | |
import base64 | |
import streamlit.components.v1 as components | |
from transformers import pipeline | |
from gtts import gTTS | |
import os | |
st.set_page_config(page_title="AR/VR Code Visualizer", layout="wide") | |
st.title("π AR/VR Code Visualizer with Editing, Interaction, and Export") | |
def load_model(): | |
return pipeline("summarization", model="philschmid/bart-large-cnn-samsum") | |
summarizer = load_model() | |
# In-browser code editor | |
st.subheader("π Write or Paste Your Python Code") | |
code = st.text_area("Enter your Python code here", height=300) | |
if code.strip(): | |
st.code(code, language="python") | |
# Parse AST for functions and calls | |
tree = ast.parse(code) | |
class FunctionCallVisitor(ast.NodeVisitor): | |
def __init__(self): | |
self.calls = {} | |
def visit_FunctionDef(self, node): | |
caller = node.name | |
self.calls[caller] = [] | |
for child in ast.walk(node): | |
if isinstance(child, ast.Call) and isinstance(child.func, ast.Name): | |
self.calls[caller].append(child.func.id) | |
self.generic_visit(node) | |
visitor = FunctionCallVisitor() | |
visitor.visit(tree) | |
call_graph = visitor.calls | |
all_functions = list(call_graph.keys()) | |
st.subheader("π Function Calls") | |
for fn, callees in call_graph.items(): | |
st.write(f"πΉ `{fn}` calls: {', '.join(callees) if callees else 'None'}") | |
# Generate AI summary | |
prompt = f"Explain the structure and purpose of the following functions and how they call each other: {call_graph}" | |
summary = summarizer(prompt, max_length=60, min_length=15, do_sample=False) | |
summary_text = summary[0]['summary_text'] | |
st.success(summary_text) | |
# Generate voice narration | |
st.subheader("π Voice Narration") | |
tts = gTTS(text=summary_text) | |
tts.save("summary.mp3") | |
st.audio("summary.mp3", format="audio/mp3") | |
# A-Frame VR scene with interactivity and export | |
def generate_aframe(call_graph): | |
spacing = 3 | |
boxes = [] | |
lines = [] | |
positions = [] | |
function_positions = {} | |
i = 0 | |
for fn in call_graph: | |
x = i * spacing | |
function_positions[fn] = (x, 1, -3) | |
positions.append(fn) | |
boxes.append(f""" | |
<a-box id="{fn}" position="{x} 1 -3" depth="0.5" height="0.5" width="2" color="#FFC65D" | |
class="clickable" | |
event-set__enter="_event: mouseenter; color: #00CED1" | |
event-set__leave="_event: mouseleave; color: #FFC65D" | |
onclick="say('{fn}')"> | |
</a-box> | |
<a-text value="{fn}" position="{x} 2 -3" align="center" color="#000"></a-text> | |
""") | |
i += 1 | |
for caller, callees in call_graph.items(): | |
for callee in callees: | |
if callee in function_positions: | |
x1, y1, z1 = function_positions[caller] | |
x2, y2, z2 = function_positions[callee] | |
lines.append(f""" | |
<a-entity line="start: {x1} {y1} {z1}; end: {x2} {y2} {z2}; color: red"></a-entity> | |
""") | |
# JavaScript for text-to-speech on click + screenshot | |
js = """ | |
<script> | |
function say(text) { | |
const utter = new SpeechSynthesisUtterance(text); | |
speechSynthesis.speak(utter); | |
} | |
function downloadScreenshot() { | |
const scene = document.querySelector('a-scene'); | |
scene.components.screenshot.capture('perspective'); | |
setTimeout(() => { | |
const a = document.createElement('a'); | |
a.href = scene.components.screenshot.canvas.toDataURL(); | |
a.download = 'scene.png'; | |
a.click(); | |
}, 500); | |
} | |
</script> | |
""" | |
html = f""" | |
<!DOCTYPE html> | |
<html> | |
<head> | |
<script src="https://aframe.io/releases/1.3.0/aframe.min.js"></script> | |
<script src="https://cdn.jsdelivr.net/gh/donmccurdy/[email protected]/dist/aframe-extras.min.js"></script> | |
<script src="https://unpkg.com/[email protected]/dist/aframe-screenshot-component.min.js"></script> | |
{js} | |
</head> | |
<body> | |
<button onclick="downloadScreenshot()" style="position: absolute; z-index: 9999; top: 10px; left: 10px; font-size: 16px;">πΈ Download Screenshot</button> | |
<a-scene screenshot> | |
<a-entity position="0 1.6 3"> | |
<a-camera wasd-controls-enabled="true" look-controls-enabled="true"></a-camera> | |
</a-entity> | |
<a-light type="ambient" color="#FFF"></a-light> | |
<a-plane rotation="-90 0 0" width="40" height="40" color="#7BC8A4"></a-plane> | |
{''.join(boxes)} | |
{''.join(lines)} | |
</a-scene> | |
</body> | |
</html> | |
""" | |
return html | |
aframe_html = generate_aframe(call_graph) | |
b64 = base64.b64encode(aframe_html.encode()).decode() | |
data_url = f"data:text/html;base64,{b64}" | |
st.subheader("π Interactive 3D Function Visualizer") | |
components.iframe(data_url, height=600) | |
else: | |
st.info("Write some Python code above to visualize, narrate, and explore it in VR.") | |