Spaces:
Running
Running
File size: 12,660 Bytes
6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d 6bcb42f 54abf5d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 |
import EchoEffect from './effects/echo-effect.js';
import RobotEffect from './effects/robot-effect.js';
import VolumeEffect from './effects/volume-effect.js';
import FadeEffect from './effects/fade-effect.js';
import MuteEffect from './effects/mute-effect.js';
import LowPassEffect from './effects/lowpass-effect.js';
import HighPassEffect from './effects/highpass-effect.js';
const effectTypes = {
ROBOT: 'robot',
REVERSE: 'reverse',
LOUDER: 'higher',
SOFTER: 'lower',
FASTER: 'faster',
SLOWER: 'slower',
ECHO: 'echo',
FADEIN: 'fade in',
FADEOUT: 'fade out',
MUTE: 'mute',
LOWPASS: 'low pass',
HIGHPASS: 'high pass'
};
const centsToFrequency = (cents) => {
return Math.round(1000000 * Math.pow(2, (cents / 100 / 12))) / 1000000;
}
class AudioEffects {
static get effectTypes () {
return effectTypes;
}
constructor (buffer, options, trimStart, trimEnd) {
this.trimStartSeconds = (trimStart * buffer.length) / buffer.sampleRate;
this.trimEndSeconds = (trimEnd * buffer.length) / buffer.sampleRate;
this.adjustedTrimStartSeconds = this.trimStartSeconds;
this.adjustedTrimEndSeconds = this.trimEndSeconds;
// Some effects will modify the playback rate and/or number of samples.
// Need to precompute those values to create the offline audio context.
const pitchRatio = Math.pow(2, 4 / 12); // A major third
let sampleCount = buffer.length;
const affectedSampleCount = Math.floor((this.trimEndSeconds - this.trimStartSeconds) *
buffer.sampleRate);
let adjustedAffectedSampleCount = affectedSampleCount;
const unaffectedSampleCount = sampleCount - affectedSampleCount;
// These affect the sampleCount
this.playbackRate = 1;
switch (options.preset) {
case effectTypes.ECHO:
sampleCount = Math.max(sampleCount,
Math.floor((this.trimEndSeconds + EchoEffect.TAIL_SECONDS) * buffer.sampleRate));
break;
case effectTypes.FASTER:
this.playbackRate = pitchRatio;
adjustedAffectedSampleCount = Math.floor(affectedSampleCount / this.playbackRate);
sampleCount = unaffectedSampleCount + adjustedAffectedSampleCount;
break;
case effectTypes.SLOWER:
this.playbackRate = 1 / pitchRatio;
adjustedAffectedSampleCount = Math.floor(affectedSampleCount / this.playbackRate);
sampleCount = unaffectedSampleCount + adjustedAffectedSampleCount;
break;
default:
if (Object.prototype.hasOwnProperty.call(options, "pitch")) {
this.playbackRate = centsToFrequency(options.pitch);
adjustedAffectedSampleCount = Math.floor(affectedSampleCount / this.playbackRate);
sampleCount = unaffectedSampleCount + adjustedAffectedSampleCount;
}
break;
}
const durationSeconds = sampleCount / buffer.sampleRate;
this.adjustedTrimEndSeconds = this.trimStartSeconds +
(adjustedAffectedSampleCount / buffer.sampleRate);
this.adjustedTrimStart = this.adjustedTrimStartSeconds / durationSeconds;
this.adjustedTrimEnd = this.adjustedTrimEndSeconds / durationSeconds;
let audioContextSampleRate = buffer.sampleRate;
let audioContextSampleCount = sampleCount;
if (Object.prototype.hasOwnProperty.call(options, "sampleRateEnforced")) {
const newSampleRate = options.sampleRateEnforced;
audioContextSampleRate = newSampleRate;
audioContextSampleCount = Math.floor((sampleCount / buffer.sampleRate) * newSampleRate);
}
if (window.OfflineAudioContext) {
this.audioContext = new window.OfflineAudioContext(1, audioContextSampleCount, audioContextSampleRate);
} else {
// Need to use webkitOfflineAudioContext, which doesn't support all sample rates.
// Resample by adjusting sample count to make room and set offline context to desired sample rate.
const sampleScale = 44100 / audioContextSampleRate;
this.audioContext = new window.webkitOfflineAudioContext(1, sampleScale * audioContextSampleCount, 44100);
}
// All effects not seen below use the original buffer because it is not modified.
this.buffer = buffer;
// For the reverse effect we need to manually reverse the data into a new audio buffer
// to prevent overwriting the original, so that the undo stack works correctly.
// Doing buffer.reverse() would mutate the original data.
if (options.preset === effectTypes.REVERSE) {
const buffer = this.buffer;
const originalBufferData = buffer.getChannelData(0);
const newBuffer = this.audioContext.createBuffer(1, buffer.length, buffer.sampleRate);
const newBufferData = newBuffer.getChannelData(0);
const bufferLength = buffer.length;
const startSamples = Math.floor(this.trimStartSeconds * buffer.sampleRate);
const endSamples = Math.floor(this.trimEndSeconds * buffer.sampleRate);
let counter = 0;
for (let i = 0; i < bufferLength; i++) {
if (i >= startSamples && i < endSamples) {
newBufferData[i] = originalBufferData[endSamples - counter - 1];
counter++;
} else {
newBufferData[i] = originalBufferData[i];
}
}
this.buffer = newBuffer;
}
if (Object.prototype.hasOwnProperty.call(options, "sampleRate")) {
// We can't overwrite the original buffer so we make a clone.
const buffer = this.buffer;
const originalBufferData = buffer.getChannelData(0);
const newBuffer = this.audioContext.createBuffer(1, buffer.length, buffer.sampleRate);
const newBufferData = newBuffer.getChannelData(0);
const bufferLength = buffer.length;
// Our clone from earlier also needs to keep the original buffer's sample rate, so we need to make yet another buffer.
const sampleRateBuffer = this.makeSampleRateBuffer(buffer, durationSeconds, options.sampleRate);
const sampleRateBufferData = sampleRateBuffer.getChannelData(0);
const startSamples = Math.floor(this.trimStartSeconds * buffer.sampleRate);
const endSamples = Math.floor(this.trimEndSeconds * buffer.sampleRate);
for (let i = 0; i < bufferLength; i++) {
if (i >= startSamples && i < endSamples) {
// We need to convert sampleRate back to the current buffer's sampleRate
const sampleRateModifiedIndex = i * (sampleRateBuffer.sampleRate / buffer.sampleRate);
const lowerIndex = Math.floor(sampleRateModifiedIndex);
const upperIndex = Math.min(lowerIndex + 1, sampleRateBuffer.length - 1);
const interpolation = sampleRateModifiedIndex - lowerIndex;
const sample =
sampleRateBufferData[lowerIndex] * (1 - interpolation) +
sampleRateBufferData[upperIndex] * interpolation;
// This works without Number.isFinite but it breaks the waveform preview SVG because sample can be NaN
newBufferData[i] = Number.isFinite(sample) ? sample : 0;
} else {
newBufferData[i] = originalBufferData[i];
}
}
this.buffer = newBuffer;
}
this.source = this.audioContext.createBufferSource();
this.source.buffer = this.buffer;
this.options = options;
}
makeSampleRateBuffer(buffer, durationSeconds, newSampleRate) {
const originalBufferData = buffer.getChannelData(0);
const newBufferLength = Math.floor(durationSeconds * newSampleRate);
const newBuffer = this.audioContext.createBuffer(1, newBufferLength, newSampleRate);
const newBufferData = newBuffer.getChannelData(0);
const bufferLength = buffer.length;
// this does work with just bufferLength but causes cut-off when newSampleRate is larger than the current sample rate
for (let i = 0; i < newBufferLength; i++) {
const originalIndex = i * (buffer.sampleRate / newSampleRate);
const lowerIndex = Math.floor(originalIndex);
const upperIndex = Math.min(lowerIndex + 1, bufferLength - 1);
const interpolation = originalIndex - lowerIndex;
const sample =
originalBufferData[lowerIndex] * (1 - interpolation) +
originalBufferData[upperIndex] * interpolation;
newBufferData[i] = sample;
}
return newBuffer;
}
process (done) {
// Some effects need to use more nodes and must expose an input and output
let input;
let output;
switch (this.options.preset) {
case effectTypes.FASTER:
case effectTypes.SLOWER:
this.source.playbackRate.setValueAtTime(this.playbackRate, this.adjustedTrimStartSeconds);
this.source.playbackRate.setValueAtTime(1.0, this.adjustedTrimEndSeconds);
break;
case effectTypes.LOUDER:
({input, output} = new VolumeEffect(this.audioContext, 1.25,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.SOFTER:
({input, output} = new VolumeEffect(this.audioContext, 0.75,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.ECHO:
({input, output} = new EchoEffect(this.audioContext,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.ROBOT:
({input, output} = new RobotEffect(this.audioContext,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.LOWPASS:
({input, output} = new LowPassEffect(this.audioContext,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.HIGHPASS:
({input, output} = new HighPassEffect(this.audioContext,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.FADEIN:
({input, output} = new FadeEffect(this.audioContext, true,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.FADEOUT:
({input, output} = new FadeEffect(this.audioContext, false,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
case effectTypes.MUTE:
({input, output} = new MuteEffect(this.audioContext,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
break;
default:
if (Object.prototype.hasOwnProperty.call(this.options, "pitch")) {
this.source.playbackRate.setValueAtTime(this.playbackRate, this.adjustedTrimStartSeconds);
this.source.playbackRate.setValueAtTime(1.0, this.adjustedTrimEndSeconds);
}
if (Object.prototype.hasOwnProperty.call(this.options, "volume")) {
({input, output} = new VolumeEffect(this.audioContext, this.options.volume,
this.adjustedTrimStartSeconds, this.adjustedTrimEndSeconds));
}
break;
}
if (input && output) {
this.source.connect(input);
output.connect(this.audioContext.destination);
} else {
// No effects nodes are needed, wire directly to the output
this.source.connect(this.audioContext.destination);
}
this.source.start();
this.audioContext.startRendering();
this.audioContext.oncomplete = ({renderedBuffer}) => {
done(renderedBuffer, this.adjustedTrimStart, this.adjustedTrimEnd);
};
}
}
export default AudioEffects; |