/**
* LittleJS ZzFXM Plugin
*/
'use strict';
/**
* Music Object - Stores a zzfx music track for later use
*
* <a href=https://keithclark.github.io/ZzFXM/>Create music with the ZzFXM tracker.</a>
* @example
* // create some music
* const music_example = new Music(
* [
* [ // instruments
* [,0,400] // simple note
* ],
* [ // patterns
* [ // pattern 1
* [ // channel 0
* 0, -1, // instrument 0, left speaker
* 1, 0, 9, 1 // channel notes
* ],
* [ // channel 1
* 0, 1, // instrument 0, right speaker
* 0, 12, 17, -1 // channel notes
* ]
* ],
* ],
* [0, 0, 0, 0], // sequence, play pattern 0 four times
* 90 // BPM
* ]);
*
* // play the music
* music_example.play();
*/
class ZzFXMusic extends Sound
{
/** Create a music object and cache the zzfx music samples for later use
* @param {[Array, Array, Array, number]} zzfxMusic - Array of zzfx music parameters
*/
constructor(zzfxMusic)
{
super(undefined);
if (!soundEnable || headlessMode) return;
this.randomness = 0;
this.sampleChannels = zzfxM(...zzfxMusic);
this.sampleRate = audioDefaultSampleRate;
}
/** Play the music that loops by default
* @param {number} [volume] - Volume to play the music at
* @param {boolean} [loop] - Should the music loop?
* @return {AudioBufferSourceNode} - The audio source node
*/
playMusic(volume=1, loop=true)
{ return super.play(undefined, volume, 1, 0, loop); }
}
///////////////////////////////////////////////////////////////////////////////
// ZzFX Music Renderer v2.0.3 by Keith Clark and Frank Force
/** Generate samples for a ZzFM song with given parameters
* @param {Array} instruments - Array of ZzFX sound parameters
* @param {Array} patterns - Array of pattern data
* @param {Array} sequence - Array of pattern indexes
* @param {number} [BPM] - Playback speed of the song in BPM
* @return {Array} - Left and right channel sample data */
function zzfxM(instruments, patterns, sequence, BPM = 125)
{
let i, j, k;
let instrumentParameters;
let note;
let sample;
let patternChannel;
let notFirstBeat;
let stop;
let instrument;
let attenuation;
let outSampleOffset;
let isSequenceEnd;
let sampleOffset = 0;
let nextSampleOffset;
let sampleBuffer = [];
let leftChannelBuffer = [];
let rightChannelBuffer = [];
let channelIndex = 0;
let panning = 0;
let hasMore = 1;
let sampleCache = {};
let beatLength = audioDefaultSampleRate / BPM * 60 >> 2;
// for each channel in order until there are no more
for (; hasMore; channelIndex++) {
// reset current values
sampleBuffer = [hasMore = notFirstBeat = outSampleOffset = 0];
// for each pattern in sequence
sequence.forEach((patternIndex, sequenceIndex) => {
// get pattern for current channel, use empty 1 note pattern if none found
patternChannel = patterns[patternIndex][channelIndex] || [0, 0, 0];
// check if there are more channels
hasMore |= patterns[patternIndex][channelIndex]&&1;
// get next offset, use the length of first channel
nextSampleOffset = outSampleOffset + (patterns[patternIndex][0].length - 2 - (notFirstBeat?0:1)) * beatLength;
// for each beat in pattern, plus one extra if end of sequence
isSequenceEnd = sequenceIndex === sequence.length - 1;
for (i = 2, k = outSampleOffset; i < patternChannel.length + isSequenceEnd; notFirstBeat = ++i) {
// <channel-note>
note = patternChannel[i];
// stop if end, different instrument or new note
stop = i === patternChannel.length + isSequenceEnd - 1 && isSequenceEnd ||
instrument !== (patternChannel[0] || 0) || note | 0;
// fill buffer with samples for previous beat, most cpu intensive part
for (j = 0; j < beatLength && notFirstBeat;
// fade off attenuation at end of beat if stopping note, prevents clicking
j++ > beatLength - 99 && stop && attenuation < 1? attenuation += 1 / 99 : 0
) {
// copy sample to stereo buffers with panning
sample = (1 - attenuation) * sampleBuffer[sampleOffset++] / 2 || 0;
leftChannelBuffer[k] = (leftChannelBuffer[k] || 0) - sample * panning + sample;
rightChannelBuffer[k] = (rightChannelBuffer[k++] || 0) + sample * panning + sample;
}
// set up for next note
if (note) {
// set attenuation
attenuation = note % 1;
panning = patternChannel[1] || 0;
if (note |= 0) {
// get cached sample
sampleBuffer = sampleCache[
[
instrument = patternChannel[sampleOffset = 0] || 0,
note
]
] = sampleCache[[instrument, note]] || (
// add sample to cache
instrumentParameters = [...instruments[instrument]],
instrumentParameters[2] = (instrumentParameters[2] || 220) * 2**(note / 12 - 1),
// allow negative values to stop notes
note > 0 ? zzfxG(...instrumentParameters) : []
);
}
}
}
// update the sample offset
outSampleOffset = nextSampleOffset;
});
}
return [leftChannelBuffer, rightChannelBuffer];
}