Spaces:
Sleeping
Sleeping
import streamlit as st | |
import streamlit.components.v1 as components | |
import mido | |
from queue import Queue | |
import threading | |
# We'll store inbound hardware MIDI in a queue and dispatch them to the browser. | |
if 'incoming_midi' not in st.session_state: | |
st.session_state.incoming_midi = Queue() | |
# A callback for Mido hardware input: | |
def midi_in_callback(msg): | |
""" | |
Called in background when hardware MIDI messages arrive. | |
We'll just forward note_on / note_off to the session's queue. | |
""" | |
if msg.type in ['note_on', 'note_off']: | |
event_type = 'noteOn' if msg.type == 'note_on' else 'noteOff' | |
st.session_state.incoming_midi.put({ | |
'type': event_type, | |
'note': msg.note, | |
'velocity': msg.velocity | |
}) | |
def open_midi_input(port_name: str): | |
if port_name == "None": | |
return None | |
return mido.open_input(port_name, callback=midi_in_callback) | |
def open_midi_output(port_name: str): | |
if port_name == "None": | |
return None | |
return mido.open_output(port_name) | |
def main(): | |
st.title("Browser-based Synth with Tone.js (5-Octave + 16 Pads)") | |
st.write(""" | |
This demo uses **Tone.js** in the browser to produce audio, so no ALSA or JACK | |
is required on the server side. Hardware MIDI input is captured by Python | |
(Mido) and forwarded to the browser so Tone.js can play it. | |
""") | |
# 1. Let user pick hardware MIDI in/out | |
in_ports = ["None"] + mido.get_input_names() | |
out_ports = ["None"] + mido.get_output_names() | |
in_choice = st.selectbox("MIDI Input Port", in_ports) | |
out_choice = st.selectbox("MIDI Output Port", out_ports) | |
# Keep references to the open ports in session state | |
if 'midi_in' not in st.session_state: | |
st.session_state.midi_in = None | |
if 'midi_out' not in st.session_state: | |
st.session_state.midi_out = None | |
# If changed, close old and open new | |
if st.session_state.midi_in and st.session_state.midi_in.name != in_choice: | |
st.session_state.midi_in.close() | |
st.session_state.midi_in = open_midi_input(in_choice) | |
elif (not st.session_state.midi_in) and in_choice != "None": | |
st.session_state.midi_in = open_midi_input(in_choice) | |
if st.session_state.midi_out and st.session_state.midi_out.name != out_choice: | |
st.session_state.midi_out.close() | |
st.session_state.midi_out = open_midi_output(out_choice) | |
elif (not st.session_state.midi_out) and out_choice != "None": | |
st.session_state.midi_out = open_midi_output(out_choice) | |
# 2. We'll embed a large HTML with Tone.js for the UI & sound. | |
# It will: | |
# - Create a poly synth for the 5-octave keys | |
# - Create a sampler or player for 16 drum pads | |
# - Provide an optional arpeggiator toggling | |
# - Post "noteOn"/"noteOff"/"drum" messages to Python | |
# - Listen for messages from Python to trigger notes | |
# For brevity, let's place it in a separate variable or file. | |
tone_html = get_tone_html() | |
# 3. Insert the custom HTML component | |
# We'll give it enough vertical space for the layout, say 600px | |
components.html(tone_html, height=600, scrolling=True) | |
# 4. We'll poll for inbound hardware MIDI messages from st.session_state.incoming_midi | |
# and send them to the browser using st.session_state.js_events | |
if 'js_events' not in st.session_state: | |
# We'll store "events to the browser" in a queue or list | |
st.session_state.js_events = Queue() | |
# We also define a function to dispatch note events to the hardware out | |
def send_to_midi_out(evt): | |
if st.session_state.midi_out is not None: | |
from mido import Message | |
if evt['type'] == 'noteOn': | |
msg = Message('note_on', note=evt['note'], velocity=evt['velocity']) | |
st.session_state.midi_out.send(msg) | |
elif evt['type'] == 'noteOff': | |
msg = Message('note_off', note=evt['note'], velocity=0) | |
st.session_state.midi_out.send(msg) | |
# if drum events => you can decide how you want to handle them | |
# 5. We'll add a small "polling" approach to handle new hardware MIDI events | |
def poll_hardware_midi(): | |
while not st.session_state.incoming_midi.empty(): | |
evt = st.session_state.incoming_midi.get_nowait() | |
# Forward to browser | |
st.session_state.js_events.put(evt) | |
poll_hardware_midi() | |
# 6. We'll also add a placeholder to show the events from the browser | |
debug_placeholder = st.empty() | |
# 7. Hidden HTML snippet to route browser postMessage -> streamlit | |
# This snippet listens for "streamlit:message" from the iframe and puts them in python queues | |
# We'll handle them in real-time in the main loop | |
components.html(""" | |
<script> | |
// This script listens for postMessage from the Tone.js UI | |
// ("window.parent.postMessage(...)"), extracts data, and re-sends | |
// to the Streamlit host using the "type: 'streamlit:message'" approach. | |
window.addEventListener('message', function(e) { | |
if (e.data && e.data.type === 'toneEvent') { | |
// forward to streamlit | |
window.parent.postMessage({ | |
type: 'streamlit:message', | |
data: { | |
eventType: e.data.eventType, | |
note: e.data.note, | |
velocity: e.data.velocity, | |
padIndex: e.data.padIndex | |
} | |
}, '*'); | |
} | |
}); | |
</script> | |
""", height=0) | |
# 8. We'll create a function to handle inbound "toneEvent" from the browser | |
# (these are user clicks on the on-screen keys/pads). | |
# We can forward them to hardware MIDI out if desired. | |
if 'incoming_browser' not in st.session_state: | |
from collections import deque | |
st.session_state.incoming_browser = deque() | |
def handle_browser_event(e): | |
# e = {eventType, note, velocity, padIndex} | |
debug_placeholder.write(f"Browser event: {e}") | |
# If it's a noteOn / noteOff, optionally forward to MIDI out | |
if e['eventType'] in ['noteOn', 'noteOff']: | |
send_to_midi_out({ | |
'type': e['eventType'], | |
'note': e['note'], | |
'velocity': e['velocity'] | |
}) | |
elif e['eventType'] == 'drum': | |
# Could also forward to a hardware sampler via MIDI out (e.g., note_on) | |
pass | |
# 9. We'll do a second poll to read from st.session_state.incoming_browser | |
# and handle them. | |
while st.session_state.incoming_browser: | |
ev = st.session_state.incoming_browser.popleft() | |
handle_browser_event(ev) | |
# 10. We'll also do a final step: | |
# * If we have events in st.session_state.js_events (from hardware), | |
# we embed them in a <script> tag so the Tone.js page sees them. | |
# * We do this by writing a small custom component with <script> that calls | |
# e.data with postMessage. | |
# | |
# A simpler approach is to create a dynamic script each run that sends | |
# all queued events to the browser: | |
js_payload = [] | |
while not st.session_state.js_events.empty(): | |
event = st.session_state.js_events.get_nowait() | |
js_payload.append(event) | |
if js_payload: | |
# We'll embed them as a list of note events in the next script call | |
script_code = "<script>\n" | |
for evt in js_payload: | |
script_code += f""" | |
window.postMessage({{ | |
type: 'hardwareMidi', | |
data: {{ | |
eventType: '{evt['type']}', | |
note: {evt['note']}, | |
velocity: {evt.get('velocity', 100)} | |
}} | |
}}, '*'); | |
""" | |
script_code += "\n</script>\n" | |
components.html(script_code, height=0) | |
st.write("Press keys on the on-screen 5-octave keyboard or 16 pads in the browser UI. " | |
"If a MIDI Input is selected, hardware note_on/off will also trigger Tone.js. " | |
"If a MIDI Output is selected, on-screen or inbound hardware events can be echoed back out.") | |
# 11. On session end, close ports | |
def cleanup(): | |
if st.session_state.midi_in: | |
st.session_state.midi_in.close() | |
if st.session_state.midi_out: | |
st.session_state.midi_out.close() | |
st.on_session_end(cleanup) | |
def get_tone_html(): | |
""" | |
Returns an HTML/JS string that: | |
- loads Tone.js | |
- creates a 5-octave key + 16 drum pads UI | |
- sets up an arpeggiator if desired | |
- listens for hardwareMidi messages from Python | |
- uses postMessage to send 'toneEvent' to Python | |
For brevity, we’ll put everything inline. In production, put it in a separate .html file. | |
""" | |
return r""" | |
<!DOCTYPE html> | |
<html> | |
<head> | |
<meta charset="utf-8"/> | |
<script src="https://cdn.jsdelivr.net/npm/[email protected]/build/Tone.js"></script> | |
<style> | |
body { font-family: sans-serif; margin: 10px; } | |
.keyboard-container, .drumpad-container { margin-bottom: 20px; } | |
.key { display: inline-block; width: 30px; height: 120px; margin: 1px; background: #fff; border: 1px solid #666; cursor: pointer; } | |
.key.black { width: 20px; height: 80px; background: #000; position: relative; margin-left: -10px; margin-right: -10px; z-index: 2; } | |
.key.active { background: #ff6961 !important; } | |
.drumpad-grid { display: grid; grid-template-columns: repeat(4, 60px); gap: 10px; } | |
.drumpad { width: 60px; height: 60px; background: #666; color: #fff; display: flex; align-items: center; justify-content: center; cursor: pointer; border-radius: 5px;} | |
.drumpad.active { background: #ff6961; } | |
</style> | |
</head> | |
<body> | |
<h2>Browser Synth with Tone.js</h2> | |
<div> | |
<p>All audio is generated client-side. | |
Python just handles hardware MIDI and forwards it here.</p> | |
</div> | |
<!-- Keyboard --> | |
<div class="keyboard-container" id="keyboard"></div> | |
<!-- Drum pads --> | |
<h3>Drum Pads</h3> | |
<div class="drumpad-container"> | |
<div class="drumpad-grid" id="drumpads"> | |
</div> | |
</div> | |
<script> | |
// ====================================================== | |
// 1) Create Tone.js instruments | |
// - a poly synth for melodic keys | |
// - a sampler or a couple of loaded drum samples | |
// ====================================================== | |
const synth = new Tone.PolySynth(Tone.Synth, { | |
oscillator: { type: 'triangle' }, | |
envelope: { attack: 0.01, decay: 0.2, sustain: 0.4, release: 1 } | |
}).toDestination(); | |
// For drums, let's just do a simple Sampler | |
const drumSampler = new Tone.Sampler({ | |
C1: "https://tonejs.github.io/audio/drum-samples/breakbeat/kick.mp3", | |
D1: "https://tonejs.github.io/audio/drum-samples/breakbeat/snare.mp3", | |
E1: "https://tonejs.github.io/audio/drum-samples/breakbeat/hh.mp3", | |
F1: "https://tonejs.github.io/audio/drum-samples/breakbeat/hho.mp3" | |
}, { onload: () => console.log("Drum samples loaded") } | |
).toDestination(); | |
// We'll map 16 pads to 4 sample notes repeatedly: | |
const padMapping = [ | |
"C1","D1","E1","F1", | |
"C1","D1","E1","F1", | |
"C1","D1","E1","F1", | |
"C1","D1","E1","F1", | |
]; | |
// ====================================================== | |
// 2) Build a 5-octave keyboard from, say, C3 to C7 | |
// We'll do a naive approach for demonstration | |
// ====================================================== | |
const noteArray = [ | |
// One octave C, C#, D, D#, E, F, F#, G, G#, A, A#, B | |
// We'll build for C3..B7 | |
]; | |
const startOctave = 3; | |
const endOctave = 7; | |
function buildNotes() { | |
const noteNames = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]; | |
for (let octave = startOctave; octave <= endOctave; octave++) { | |
for (let n of noteNames) { | |
noteArray.push(n + octave); | |
} | |
} | |
} | |
buildNotes(); | |
const keyboardDiv = document.getElementById('keyboard'); | |
noteArray.forEach((note, idx) => { | |
// Determine if black or white | |
const hasSharp = note.includes("#"); | |
let keyDiv = document.createElement('div'); | |
keyDiv.classList.add('key'); | |
if (hasSharp) { | |
keyDiv.classList.add('black'); | |
} | |
keyDiv.dataset.note = note; | |
keyboardDiv.appendChild(keyDiv); | |
keyDiv.addEventListener('mousedown', () => { | |
playNote(note, 100); | |
keyDiv.classList.add('active'); | |
}); | |
keyDiv.addEventListener('mouseup', () => { | |
releaseNote(note); | |
keyDiv.classList.remove('active'); | |
}); | |
// For mobile / touch | |
keyDiv.addEventListener('touchstart', (e) => { | |
e.preventDefault(); | |
playNote(note, 100); | |
keyDiv.classList.add('active'); | |
}); | |
keyDiv.addEventListener('touchend', (e) => { | |
e.preventDefault(); | |
releaseNote(note); | |
keyDiv.classList.remove('active'); | |
}); | |
}); | |
// ====================================================== | |
// 3) Build 16 drum pads | |
// ====================================================== | |
const drumpadsDiv = document.getElementById('drumpads'); | |
for (let i=0; i<16; i++){ | |
let pad = document.createElement('div'); | |
pad.classList.add('drumpad'); | |
pad.innerText = `Pad ${i+1}`; | |
pad.dataset.pad = i; | |
drumpadsDiv.appendChild(pad); | |
pad.addEventListener('mousedown', () => { | |
triggerDrum(i, 100); | |
pad.classList.add('active'); | |
setTimeout(() => { pad.classList.remove('active'); }, 200); | |
}); | |
// Touch | |
pad.addEventListener('touchstart', (e)=>{ | |
e.preventDefault(); | |
triggerDrum(i, 100); | |
pad.classList.add('active'); | |
setTimeout(()=> pad.classList.remove('active'), 200); | |
}); | |
} | |
// ====================================================== | |
// 4) Tone.js note on/off | |
// ====================================================== | |
function playNote(noteName, velocity){ | |
// velocity is 0..127 => scale 0..1 | |
let vol = velocity / 127; | |
// start an audio context, required in some browsers | |
Tone.context.resume(); | |
synth.triggerAttack(noteName, Tone.now(), vol); | |
// Send message to python | |
window.parent.postMessage({ | |
type: 'toneEvent', | |
eventType: 'noteOn', | |
note: midiNoteNumber(noteName), | |
velocity: velocity | |
}, '*'); | |
} | |
function releaseNote(noteName){ | |
synth.triggerRelease(noteName, Tone.now()); | |
// Send message to python | |
window.parent.postMessage({ | |
type: 'toneEvent', | |
eventType: 'noteOff', | |
note: midiNoteNumber(noteName), | |
velocity: 0 | |
}, '*'); | |
} | |
function triggerDrum(padIndex, velocity){ | |
Tone.context.resume(); | |
let note = padMapping[padIndex % padMapping.length]; | |
let vol = velocity / 127; | |
drumSampler.triggerAttack(note, Tone.now(), vol); | |
// Also send to python | |
window.parent.postMessage({ | |
type: 'toneEvent', | |
eventType: 'drum', | |
padIndex: padIndex, | |
velocity: velocity | |
}, '*'); | |
} | |
// ====================================================== | |
// 5) Convert from note name to approximate MIDI number | |
// We'll do a naive approach. If you want a robust | |
// approach, store a dictionary or parse note name. | |
// ====================================================== | |
function midiNoteNumber(noteStr){ | |
// parse e.g. "C#4" => base + semitone + octave | |
// There's a standard formula: midi = 12 * (octave + 1) + noteIndex | |
// We can do a small map: | |
const noteMap = {'C':0,'C#':1,'D':2,'D#':3,'E':4,'F':5,'F#':6,'G':7,'G#':8,'A':9,'A#':10,'B':11}; | |
let match = noteStr.match(/^([A-G]#?)(\d)$/); | |
if(!match) return 60; | |
let base = noteMap[match[1]]; | |
let oct = parseInt(match[2]); | |
return 12*(oct+1) + base; | |
} | |
// ====================================================== | |
// 6) Listen for "hardwareMidi" messages from Python | |
// i.e. user played a note on their real keyboard | |
// We'll automatically call "playNote"/"releaseNote" in Tone | |
// so it comes out the browser audio | |
// ====================================================== | |
window.addEventListener('message', (e)=>{ | |
if(e.data && e.data.type === 'hardwareMidi'){ | |
let evt = e.data.data; | |
if(evt.eventType === 'noteOn'){ | |
// We'll need to map MIDI note -> note name | |
// We'll do a quick reverse map. For robust code, do a real function or table. | |
let noteName = midiToNoteName(evt.note); | |
playNote(noteName, evt.velocity); | |
} else if(evt.eventType === 'noteOff'){ | |
let noteName = midiToNoteName(evt.note); | |
releaseNote(noteName); | |
} | |
} | |
}); | |
function midiToNoteName(midiNum){ | |
const noteNames = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]; | |
let octave = Math.floor(midiNum / 12) - 1; | |
let noteIndex = midiNum % 12; | |
return noteNames[noteIndex] + octave; | |
} | |
// ====================================================== | |
// 7) (Optional) In-browser arpeggiator, if you want it | |
// You could do something like keep track of notes held | |
// in an array and a Tone.Pattern or Tone.Sequence | |
// ====================================================== | |
// For demonstration, we'll skip a full arpeggiator code snippet here. | |
// If you want to do it, you'd maintain an array of held notes, then | |
// create a Tone.Pattern or Tone.Loop that triggers them in sequence. | |
// End of HTML | |
</script> | |
</body> | |
</html> | |
""".strip() | |
if __name__ == "__main__": | |
main() | |