Codeez / app.py
masadonline's picture
Update app.py
520c110 verified
raw
history blame
6.44 kB
import streamlit as st
import ast
import base64
import streamlit.components.v1 as components
from transformers import pipeline
from gtts import gTTS
import os
import qrcode
from io import BytesIO
# Page config
st.set_page_config(page_title="AR/VR Code Visualizer", layout="wide")
st.title("πŸ‘“ AR/VR Code Visualizer with Editing, Interaction, and Export")
@st.cache_resource
def load_model():
return pipeline("summarization", model="philschmid/bart-large-cnn-samsum")
summarizer = load_model()
# Code input area
st.subheader("πŸ“ Write or Paste Your Python Code")
code = st.text_area("Enter your Python code here", height=300)
if code.strip():
st.code(code, language="python")
# Parse code to extract functions and their calls
tree = ast.parse(code)
class FunctionCallVisitor(ast.NodeVisitor):
def __init__(self):
self.calls = {}
def visit_FunctionDef(self, node):
caller = node.name
self.calls[caller] = []
for child in ast.walk(node):
if isinstance(child, ast.Call) and isinstance(child.func, ast.Name):
self.calls[caller].append(child.func.id)
self.generic_visit(node)
visitor = FunctionCallVisitor()
visitor.visit(tree)
call_graph = visitor.calls
# Display call relationships
st.subheader("πŸ“Š Function Calls")
for fn, callees in call_graph.items():
st.write(f"πŸ”Ή `{fn}` calls: {', '.join(callees) if callees else 'None'}")
# Generate AI Summary
prompt = f"Explain the structure and purpose of the following functions and how they call each other: {call_graph}"
summary = summarizer(prompt, max_length=60, min_length=15, do_sample=False)
summary_text = summary[0]['summary_text']
st.success(summary_text)
# Voice narration using gTTS
st.subheader("πŸ”Š Voice Narration")
tts = gTTS(text=summary_text)
tts.save("summary.mp3")
st.audio("summary.mp3", format="audio/mp3")
# Generate A-Frame HTML scene
def generate_aframe(call_graph):
function_data = {
"functions": [],
"relationships": []
}
function_positions = {}
spacing = 3
for i, fn in enumerate(call_graph):
x = i * spacing
function_positions[fn] = (x, 1, -3)
function_data["functions"].append({"name": fn, "position": [x, 1, -3]})
for caller, callees in call_graph.items():
for callee in callees:
if callee in function_positions:
x1, y1, z1 = function_positions[caller]
x2, y2, z2 = function_positions[callee]
function_data["relationships"].append({"start": [x1, y1, z1], "end": [x2, y2, z2]})
js = """
<script>
const functionData = """ + str(function_data).replace("'", '"') + """;
functionData.functions.forEach(function(fn) {
const el = document.createElement('a-box');
el.setAttribute('id', fn.name);
el.setAttribute('position', fn.position.join(' '));
el.setAttribute('depth', '0.5');
el.setAttribute('height', '0.5');
el.setAttribute('width', '2');
el.setAttribute('color', '#FFC65D');
el.setAttribute('class', 'clickable');
el.setAttribute('event-set__enter', '_event: mouseenter; color: #00CED1');
el.setAttribute('event-set__leave', '_event: mouseleave; color: #FFC65D');
el.setAttribute('onclick', `say('${fn.name}')`);
document.querySelector('a-scene').appendChild(el);
const text = document.createElement('a-text');
text.setAttribute('value', fn.name);
text.setAttribute('position', `${fn.position[0]} ${fn.position[1] + 1} ${fn.position[2]}`);
text.setAttribute('align', 'center');
text.setAttribute('color', '#000');
document.querySelector('a-scene').appendChild(text);
});
functionData.relationships.forEach(function(rel) {
const el = document.createElement('a-entity');
el.setAttribute('line', `start: ${rel.start.join(' ')}; end: ${rel.end.join(' ')}; color: red`);
document.querySelector('a-scene').appendChild(el);
});
function say(text) {
const utter = new SpeechSynthesisUtterance(text);
speechSynthesis.speak(utter);
}
</script>
"""
html = f"""
<!DOCTYPE html>
<html>
<head>
<script src="https://aframe.io/releases/1.3.0/aframe.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/donmccurdy/[email protected]/dist/aframe-extras.min.js"></script>
<script src="https://unpkg.com/[email protected]/dist/aframe-screenshot-component.min.js"></script>
{js}
</head>
<body>
<a-scene screenshot>
<a-entity position="0 1.6 3">
<a-camera wasd-controls-enabled="true" look-controls-enabled="true"></a-camera>
</a-entity>
<a-light type="ambient" color="#FFF"></a-light>
<a-plane rotation="-90 0 0" width="40" height="40" color="#7BC8A4"></a-plane>
</a-scene>
</body>
</html>
"""
return html
aframe_html = generate_aframe(call_graph)
b64 = base64.b64encode(aframe_html.encode()).decode()
data_url = f"data:text/html;base64,{b64}"
st.subheader("🌐 Interactive 3D Function Visualizer")
components.iframe(data_url, height=600)
# Generate and display QR code for AR/VR view
st.subheader("πŸ“± AR View on Mobile")
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
space_url = "https://huggingface.co/spaces/your-space-name" # Replace with actual Hugging Face Space URL
qr.add_data(space_url)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img_byte_arr = BytesIO()
img.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
st.image(img_bytes, caption="Scan this QR code to view the VR scene in AR on your mobile!")
else:
st.info("Write some Python code above to visualize, narrate, and explore it in 3D/AR.")