All definitions come from the Mozilla Developer Network (MDN) pages giving details about the Web Audio API.
Let's study the most useful filter nodes: gain, stereo panner, filter, convolver (reverb).
Useful for setting volume... see the Gain node's documentation.
Definition: "The GainNode interface represents a change in volume. It is an AudioNode audio-processing module that causes a given gain to be applied to the input data before its propagation to the output. A GainNode always has exactly one input and one output, both with the same number of channels."
Example below—
HTML:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>WebAudio example of biquad filter node</title>
</head>
<body>
<audio src="https://mainline.i3s.unice.fr/mooc/drums.mp3" id="gainExample" controls loop crossorigin="anonymous"></audio>
<br>
<label for="gainSlider">Gain</label>
<input type="range" min="0" max="1" step="0.01" value="1" id="gainSlider" />
</body>
</html>
JavaScript:
let audioContext;
let gainExample, gainSlider, gainNode;
window.onload = () => {
// get the AudioContext
audioContext = new AudioContext();
// the audio element
gainExample = document.querySelector('#gainExample');
gainSlider = document.querySelector('#gainSlider');
// fix for autoplay policy
gainExample.addEventListener('play',() => audioContext.resume());
buildAudioGraph();
// input listener on the gain slider
gainSlider.oninput = (evt) => {
gainNode.gain.value = evt.target.value;
};
};
function buildAudioGraph() {
// create source and gain node
let gainMediaElementSource = audioContext.createMediaElementSource(gainExample);
gainNode = audioContext.createGain();
// connect nodes together
gainMediaElementSource.connect(gainNode);
gainNode.connect(audioContext.destination);
}
The gain property (in the above code) corresponds to the multiplication we apply to the input signal volume. A value of 1 will keep the volume unchanged. A value < 1 will lower the volume (0 will mute the signal), and a value > 1 will increase the global volume, with a risk of clipping. With gain values > 1, we usually add a compressor node to the signal chain to prevent clipping. You will see an example of this when we discuss the compressor node.
See the Stereo Panner node's documentation.
Definition: "The StereoPannerNode interface of the Web Audio API represents a simple stereo panner node that can be used to pan an audio stream left or right. The pan property takes a value between -1 (full left pan) and 1 (full right pan)."
Example below—
HTML:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>WebAudio example of biquad filter node</title>
</head>
<body>
<audio src="https://mainline.i3s.unice.fr/mooc/drums.mp3" id="pannerPlayer" controls loop crossorigin="anonymous"></audio>
<br>
<label for="pannerSlider">Balance</label>
<input type="range" min="-1" max="1" step="0.1" value="0" id="pannerSlider" />
</body>
</html>
JavaScript:
let audioContext;
let player, pannerSlider, pannerNode;
window.onload = () => {
// get the AudioContext
audioContext = new AudioContext();
// the audio element
playerPanner = document.querySelector('#pannerPlayer');
playerPanner.onplay = (e) => {audioContext.resume();}
pannerSlider = document.querySelector('#pannerSlider');
buildAudioGraphPanner();
// input listener on the gain slider
pannerSlider.oninput = (evt) => {
pannerNode.pan.value = evt.target.value;
};
};
function buildAudioGraphPanner() {
// create source and gain node
let source = audioContext.createMediaElementSource(playerPanner);
pannerNode = audioContext.createStereoPanner();
// connect nodes together
source.connect(pannerNode);
pannerNode.connect(audioContext.destination);
}
Source code extract:
// the audio element
playerPanner = document.querySelector('#pannerPlayer');
pannerSlider = document.querySelector('#pannerSlider');
// create nodes
var source = audioContext.createMediaElementSource(playerPanner);
pannerNode = audioContext.createStereoPanner();
// connect nodes together
source.connect(pannerNode);
pannerNode.connect(audioContext.destination);
// input listener on the gain slider
pannerSlider.oninput = function(evt){
pannerNode.pan.value = evt.target.value;
};
Definition: "The BiquadFilterNode interface represents a simple low-order filter, and is created using the AudioContext.createBiquadFilter() method. It is an AudioNode that can represent different kinds of filters, tone control devices, and graphic equalizers. A BiquadFilterNode always has exactly one input and one output."
See also the Biquad Filter node's documentation.
HTML:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>WebAudio example of biquad filter node</title>
</head>
<body>
<audio src="https://mainline.i3s.unice.fr/mooc/guitarRiff1.mp3" id="biquadExample" controls loop crossorigin="anonymous"></audio>
<br>
<label>Frequency</label>
<input type="range" min="0" max="22050" step="1" value="350" id="biquadFilterFrequencySlider" />
<label>Detune</label>
<input type="range" min="0" max="100" step="1" value="0" id="biquadFilterDetuneSlider" />
<label>Q</label>
<input type="range" min="0.0001" max="1000" step="0.01" value="1" id="biquadFilterQSlider" />
<label>Type</label>
<select id="biquadFilterTypeSelector">
<option value="lowpass" selected>lowpass</option>
<option value="highpass">highpass</option>
<option value="bandpass">bandpass</option>
<option value="lowshelf">lowshelf</option>
<option value="highshelf">highshelf</option>
<option value="peaking">peaking</option>
<option value="notch">notch</option>
<option value="allpass">allpass</option>
</select>
</body>
</html>
JavaScript:
let audioContext = new AudioContext();
/* BiquadFilterNode */
let biquadExample = document.querySelector('#biquadExample');
biquadExample.onplay = (e) => {audioContext.resume();}
let biquadFilterFrequencySlider = document.querySelector('#biquadFilterFrequencySlider');
let biquadFilterDetuneSlider = document.querySelector('#biquadFilterDetuneSlider');
let biquadFilterQSlider = document.querySelector('#biquadFilterQSlider');
let biquadFilterTypeSelector = document.querySelector('#biquadFilterTypeSelector');
let biquadExampleMediaElementSource = audioContext.createMediaElementSource(biquadExample);
let filterNode = audioContext.createBiquadFilter();
biquadExampleMediaElementSource.connect(filterNode);
filterNode.connect(audioContext.destination);
biquadFilterFrequencySlider.oninput = (evt) => {
filterNode.frequency.value = parseFloat(evt.target.value);
};
biquadFilterDetuneSlider.oninput = (evt) => {
filterNode.detune.value = parseFloat(evt.target.value);
};
biquadFilterQSlider.oninput = (evt) => {
filterNode.Q.value = parseFloat(evt.target.value);
};
biquadFilterTypeSelector.onchange = (evt) => {
filterNode.type = evt.target.value;
}
;
The most useful slider is the frequency slider (that changes the frequency value property of the node). The meaning of the different properties (frequency, detune and Q) differs depending on the type of the filter you use (click on the dropdown menu to see the different types available). Read this documentation for details on the different filters and how the frequency, detune and Q properties are used with each of these filter types.
Here is a nice graphic application that shows the frequency responses to the various filters, you can choose the type of filters and play with the different property values using sliders—
HTML:
<!doctype html>
<head>
<link href='http://fonts.googleapis.com/css?family=Open+Sans:400,600' rel='stylesheet' type='text/css'>
<title>Frequency Response | Web Audio API</title>
</head>
<body>
<div id="container">
<div id="wrapper">
<div id="content">
<div class="post">
<h2>Frequency Response</h2>
<p>
<p>A sample showing the frequency response graphs of various kinds of <code>BiquadFilterNodes</code>.</p>
</p>
<p>
<!-- Slider stuff -->
<script type="text/javascript" src="events.js"></script>
<p><style type="text/css"></style>
<style type="text/css">
#slider { margin: 10px; }
</style></p>
<script src="/static/js/shared.js"></script>
<script type="text/javascript" src="frequency-response-sample.js"></script>
<div id="info">
</div>
<p><canvas id="canvasID" width="500" height="250" style="float: left;">
</canvas></p>
<p><br><br></p>
<!-- Sliders and other controls will be added here -->
<div style="display: inline-block; margin-left: 10px;" id="controls">
<select onchange="changeFilterType(this.value);">
<option value="lowpass">LowPass</option>
<option value="highpass">HighPass</option>
<option value="bandpass">BandPass</option>
<option value="lowshelf">LowShelf</option>
<option value="highshelf">HighShelf</option>
<option value="peaking">Peaking</option>
<option value="notch">Notch</option>
<option value="allpass">AllPass</option>
</select>
<div> <input id="frequencySlider" type="range" min="0" max="1" step="0.01" value="0" style="height: 20px; width: 200px;"> <span id="frequency-value" style="position:relative; top:-5px;">frequency = 1277.46 Hz</span> </div> <br> <div> <input id="QSlider" type="range" min="0" max="20" step="0.01" value="0" style="height: 20px; width: 200px;"> <span id="Q-value" style="position:relative; top:-5px;">Q = 8.59 dB</span> </div> <br> <div> <input id="gainSlider" type="range" min="0" max="5" step="0.01" value="0" style="height: 20px; width: 200px;"> <span id="gain-value" style="position:relative; top:-5px;">gain = 3.12</span> </div> <br> </div>
</p>
</div>
</body>
</html>
CSS:
body {
background: black;
font-family: 'Open Sans', sans-serif;
color: #ccc;
}
#container {
margin: 0 auto;
width: 640px;
background: #111;
padding: 20px;
border: 1px #222 solid;
}
#header h1 { margin: 0; }
#nav ul { margin: 0; }
#nav li {
font-weight: bold;
float: right;
list-style: none;
margin-left: 10px;
}
#header a, #nav a { color: rgb(28, 161, 202); text-decoration: none; }
#header a:hover, #nav a:hover { color: rgb(157, 204, 255); text-decoration: underline; }
article a { color: #FEFEF2; text-decoration: none; }
article a:hover { color: white; text-decoration: underline; }
a { color: #FEF0C9; text-decoration: none; }
a:hover { color: #FEF6E4; text-decoration: underline; }
article {
font-weight: bold;
margin: 20px;
padding: 20px;
text-align: center;
cursor: pointer;
color: black;
}
#content article:nth-child(6n+1) { background: #FA6B67; }
#content article:nth-child(6n+2) { background: #70CA5D; }
#content article:nth-child(6n+3) { background: #3CC9E4; }
#content article:nth-child(6n+4) { background: #EC945E; }
#content article:nth-child(6n+5) { background: #7189DE; }
#content article:nth-child(6n+6) { background: #E798DC; }
.sample .name {
font-size: 22px;
padding-bottom: 5px;
color: #fee;
}
.sample .info {
color: #B0F0F8;
}
input[type='range'] {
-webkit-appearance: none;
background-color: gray;
height: 20px;
border-radius: 15px;
padding: 0 3px;
}
input[type='range']::-webkit-slider-thumb {
vertical-align: middle;
-webkit-appearance: none;
background-color: #444;
width: 16px;
height: 16px;
border-radius: 16px;
}
input[type="checkbox"] {
display:none;
}
input[type="checkbox"] + label span {
display:inline-block;
width:19px;
height:19px;
margin:-1px 4px 0 0;
vertical-align:middle;
background:url(/static/images/check_radio_sheet.png) left top no-repeat;
cursor:pointer;
}
input[type="checkbox"]:checked + label span {
background:url(/static/images/check_radio_sheet.png) -19px top no-repeat;
}
JavaScript:
// ------ shared.js
// Start off by initializing a new context.
context = new AudioContext()
let frequencyRenderer;
//---------- events.js
function configureSlider(name, value, min, max, handler) {
// var controls = document.getElementById("controls");
//
let divName = name + "Slider";
let slider = document.getElementById(divName);
slider.min = min;
slider.max = max;
slider.value = value;
slider.oninput = function() { handler(0, this); };
}
// Events
// init() once the page has finished loading.
window.onload = init;
let filter;
let filters = [];
let frequency = 2000;
let resonance = 5;
let gain = 2;
let canvas;
let canvasContext;
function frequencyHandler(event, ui) {
let value = ui.value;
let nyquist = context.sampleRate * 0.5;
let noctaves = Math.log(nyquist / 10.0) / Math.LN2;
let v2 = Math.pow(2.0, noctaves * (value - 1.0));
let cutoff = v2*nyquist;
let info = document.getElementById("frequency-value");
info.innerHTML = "frequency = " + (Math.floor(cutoff*100)/100) + " Hz";
filter.frequency.value = cutoff;
frequencyRenderer.draw(filters);
}
function resonanceHandler(event, ui) {
let value = ui.value;
let info = document.getElementById("Q-value");
info.innerHTML = "Q = " + (Math.floor(value*100)/100) + " dB";
filter.Q.value = value; // !! Q value not same as resonance...
frequencyRenderer.draw(filters);
}
function gainHandler(event, ui) {
let value = ui.value;
let info = document.getElementById("gain-value");
info.innerHTML = "gain = " + (Math.floor(value*100)/100);
filter.gain.value = value;
frequencyRenderer.draw(filters);
}
function initAudio() {
filter = context.createBiquadFilter();
filter.Q.value = 5;
filter.frequency.value = 2000;
filter.gain.value = 2;
filter.connect(context.destination);
filters.push(filter);
// * filters from multiband EQ
/*
[60, 170, 350, 1000, 3500, 10000].forEach(function(freq, i) {
var eq = context.createBiquadFilter();
eq.frequency.value = freq;
eq.type = "peaking";
eq.gain.value = -30+Math.random()*60;
filters.push(eq);
});
*/
}
function init() {
initAudio();
canvas = document.getElementById('canvasID');
canvasContext = canvas.getContext('2d');
configureSlider("frequency", 0.68, 0, 1, frequencyHandler);
configureSlider("Q", resonance, 0, 30, resonanceHandler);
configureSlider("gain", gain, -30, 30, gainHandler);
frequencyRenderer = new FilterFrequencyResponseRenderer(canvas, context);
frequencyRenderer.draw(filters);
}
function changeFilterType( value ) {
filter.type = value;
frequencyRenderer.draw(filters);
}
// ----------- FILTER RESPONSE RENDERER
function FilterFrequencyResponseRenderer(canvas, audioCxt) {
let c = canvas;
let ctx = c.getContext('2d');
let width = c.width;
let height = c.height;
let audioContext = audioCxt;
let curveColor = "rgb(224,27,106)";
let playheadColor = "rgb(80, 100, 80)";
let gridColor = "rgb(100,100,100)";
let textColor = "rgb(81,127,207)";
let dbScale = 60;
let pixelsPerDb;
let dbToY = function(db) {
let y = (0.5 * height) - pixelsPerDb * db;
return y;
};
let draw = function(filters) {
ctx.save();
ctx.clearRect(0, 0, width, height);
let frequencyHz = new Float32Array(width);
let magResponse = new Float32Array(width);
let phaseResponse = new Float32Array(width);
let nyquist = 0.5 * audioContext.sampleRate;
let noctaves = 11;
filters.forEach(function(filt) {
ctx.strokeStyle = curveColor;
ctx.lineWidth = 3;
ctx.beginPath();
ctx.moveTo(0, 0);
pixelsPerDb = (0.5 * height) / dbScale;
// First get response.
for (let i = 0; i < width; ++i) {
let f = i / width;
// Convert to log frequency scale (octaves).
f = nyquist * Math.pow(2.0, noctaves * (f - 1.0));
frequencyHz[i] = f;
}
filt.getFrequencyResponse(frequencyHz, magResponse, phaseResponse);
/*
var magResponseOfFilter = new Float32Array(width);
filt.getFrequencyResponse(frequencyHz, magResponseOfFilter, phaseResponse);
// sum it to magResponse
for(var l = 0; l < width; l++) {
magResponse[l] = magResponseOfFilter[l];
}
*/
for (let i = 0; i < width; ++i) {
let f = magResponse[i];
let response = magResponse[i];
let dbResponse = 20.0 * Math.log(response) / Math.LN10;
let x = i;
let y = dbToY(dbResponse);
if ( i === 0 )
ctx.moveTo(x,y);
else
ctx.lineTo(x, y);
}
ctx.stroke();
ctx.beginPath();
ctx.lineWidth = 1;
ctx.strokeStyle = gridColor;
// Draw frequency scale.
for (let octave = 0; octave <= noctaves; octave++) {
let x = octave * width / noctaves;
ctx.strokeStyle = gridColor;
ctx.moveTo(x, 30);
ctx.lineTo(x, height);
ctx.stroke();
let f = nyquist * Math.pow(2.0, octave - noctaves);
let value = f.toFixed(0);
let unit = 'Hz';
if (f > 1000) {
unit = 'KHz';
value = (f/1000).toFixed(1);
}
ctx.textAlign = "center";
ctx.strokeStyle = textColor;
ctx.strokeText(value + unit, x, 20);
}
// Draw 0dB line.
ctx.beginPath();
ctx.moveTo(0, 0.5 * height);
ctx.lineTo(width, 0.5 * height);
ctx.stroke();
// Draw decibel scale.
for (let db = -dbScale; db < dbScale - 10; db += 10) {
let y = dbToY(db);
ctx.strokeStyle = textColor;
ctx.strokeText(db.toFixed(0) + "dB", width - 40, y);
ctx.strokeStyle = gridColor;
ctx.beginPath();
ctx.moveTo(0, y);
ctx.lineTo(width, y);
ctx.stroke();
}
});
ctx.restore();
};
return {
draw: draw
};
}
Multiple filters are often used together. We will make a multi band equalizer in a next lesson, and use six filters with type=peaking.
Source code extract:
var ctx = window.AudioContext || window.webkitAudioContext;
var audioContext = new ctx();
/* BiquadFilterNode */
var biquadExample = document.querySelector('#biquadExample');
var biquadFilterFrequencySlider =
document.querySelector('#biquadFilterFrequencySlider');
var biquadFilterDetuneSlider =
document.querySelector('#biquadFilterDetuneSlider');
var biquadFilterQSlider =
document.querySelector('#biquadFilterQSlider');
var biquadFilterTypeSelector =
document.querySelector('#biquadFilterTypeSelector');
var biquadExampleMediaElementSource =
audioContext.createMediaElementSource(biquadExample);
var filterNode = audioContext.createBiquadFilter();
biquadExampleMediaElementSource.connect(filterNode);
filterNode.connect(audioContext.destination);
biquadFilterFrequencySlider.oninput = function(evt){
filterNode.frequency.value = parseFloat(evt.target.value);
};
biquadFilterDetuneSlider.oninput = function(evt){
filterNode.detune.value = parseFloat(evt.target.value);
};
biquadFilterQSlider.oninput = function(evt){
filterNode.Q.value = parseFloat(evt.target.value);
};
biquadFilterTypeSelector.onchange = function(evt){
filterNode.type = evt.target.value;
};
Definition: "The ConvolverNode interface is an AudioNode that performs a Linear Convolution on a given AudioBuffer, often used to achieve a reverb effect. A ConvolverNode always has exactly one input and one output."
Read the Convolver node's documentation.
HTML:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>WebAudio example of convolver node</title>
</head>
<body>
<audio src="https://mainline.i3s.unice.fr/mooc/guitarRiff1.mp3" id="convolverPlayer" controls loop crossorigin="anonymous"></audio>
<br>
<label for="convolverSlider">Reverb (Dry/Wet)</label>
<input type="range" min="0" max="1" step="0.1" value="0" id="convolverSlider" />
</body>
</html>
JavaScript:
let audioContext;
let convolverSlider, convolverNode, convolverGain, directGain;
let impulseURL = "https://mainline.i3s.unice.fr/mooc/Scala-Milan-Opera-Hall.wav";
let decodedImpulse;
window.onload = () => {
// get the AudioContext
audioContext = new AudioContext();
// the audio element
playerConvolver = document.querySelector('#convolverPlayer');
playerConvolver.onplay = (e) =>{audioContext.resume();}
convolverSlider = document.querySelector('#convolverSlider');
loadImpulse(impulseURL, function() {
// we get here only when the impulse has finished
// loading
buildAudioGraphConvolver();
});
// input listener on the gain slider
convolverSlider.oninput = (evt) => {
// We set the gain at the output of the convolver (wet signal route) with the
// slider value, and we set the gain between the source and dest
// (dry signal route) to 1 - wet
convolverGain.gain.value = parseFloat(evt.target.value);
directGain.gain.value = 1 - convolverGain.gain.value;
};
};
function buildAudioGraphConvolver() {
// create source and gain node
let source = audioContext.createMediaElementSource(playerConvolver);
convolverNode = audioContext.createConvolver();
convolverNode.buffer = decodedImpulse;
convolverGain = audioContext.createGain();
convolverGain.gain.value = 0;
directGain = audioContext.createGain();
directGain.gain.value = 1;
// direct/dry route source -> directGain -> destination
source.connect(directGain);
directGain.connect(audioContext.destination);
// wet route with convolver: source -> convolver
// -> convolverGain -> destination
source.connect(convolverNode);
convolverNode.connect(convolverGain);
convolverGain.connect(audioContext.destination);
}
function loadImpulse(url, callback) {
ajaxRequest = new XMLHttpRequest();
ajaxRequest.open('GET', url, true);
ajaxRequest.responseType = 'arraybuffer';
ajaxRequest.onload = () => {
let impulseData = ajaxRequest.response;
audioContext.decodeAudioData(impulseData, function(buffer) {
decodedImpulse = buffer;
callback();
});
};
ajaxRequest.onerror = (e) => {
console.log("Error with decoding audio data" + e.err);
};
ajaxRequest.send();
}
From Wikipedia: a convolution is a mathematical process which can be applied to an audio signal to achieve many interesting high-quality linear effects. Very often, the effect is used to simulate an acoustic space such as a concert hall, cathedral, or outdoor amphitheater. It can also be used for complex filter effects, like a muffled sound coming from inside a closet, sound underwater, sound coming through a telephone, or playing through a vintage speaker cabinet. This technique is very commonly used in major motion picture and music production and is considered to be extremely versatile and of high quality.
Each unique effect is defined by an impulse response. An impulse response can be represented as an audio file and can be recorded from a real acoustic space such as a cave, or can be synthetically generated through a wide variety of techniques. We can find many high quality impulses on the Web. The impulse used in the example is the one recorded at the opera: La Scala Opera of Milan, in Italy. It's a .wav file.
Try this demo to see the difference between different impulse files!
So before building the audio graph, we need to download the impulse. For this, we use an Ajax request (this will be detailed during Module 3), but for the moment, just take this function as it is... The Web Audio API requires that impulse files should be decoded in memory before use. Accordingly, once the requested file has been downloaded, we call the decodeAudioData method. Once the impulse is decoded, we can build the graph. So typical use is as follows:
var impulseURL = "https://mainline.i3s.unice.fr/mooc/Scala-Milan-Opera-Hall.wav";
var decodedImpulse;
...
loadImpulse(impulseURL, function() {
// we only get here once the impulse has finished
// loading and is decoded
buildAudioGraphConvolver();
});
...
function loadImpulse(url, callback) {
ajaxRequest = new XMLHttpRequest();
ajaxRequest.open('GET', url, true);
ajaxRequest.responseType = 'arraybuffer'; // for binary transfer
ajaxRequest.onload = function() {
// The impulse has been loaded
var impulseData = ajaxRequest.response;
// let's decode it.
audioContext.decodeAudioData(impulseData, function(buffer) {
// The impulse has been decoded
decodedImpulse = buffer;
// Let's call the callback function, we're done!
callback();
});
};
ajaxRequest.onerror = function(e) {
console.log("Error with loading audio data" + e.err);
};
ajaxRequest.send();
}
Now, let's consider the function which builds the graph. In order to set the quantity of reverb we would like to apply, we need two separate routes for the signal:
One "dry" route where we directly connect the audio source to the destination,
One "wet" route where we connect the audio source to the convolver node (that will add a reverb effect), then to the destination,
At the end of both routes, before the destination, we add a gain node, so that we can specify the quantity of dry and wet signal we're going to send to the speakers.
The audio graph will look like this (picture taken with the now discontinued FireFox WebAudio debugger, you should get similar results with the Chrome Audion extension):
And here is the function which builds the graph:
function buildAudioGraphConvolver() {
// create the nodes
var source = audioContext.createMediaElementSource(playerConvolver);
convolverNode = audioContext.createConvolver();
// Set the buffer property of the convolver node with the decoded impulse
convolverNode.buffer = decodedImpulse;
convolverGain = audioContext.createGain();
convolverGain.gain.value = 0;
directGain = audioContext.createGain();
directGain.gain.value = 1;
// direct/dry route source -> directGain -> destination
source.connect(directGain);
directGain.connect(audioContext.destination);
// wet route with convolver: source -> convolver
// -> convolverGain -> destination
source.connect(convolverNode);
convolverNode.connect(convolverGain);
convolverGain.connect(audioContext.destination);
}
Note that at line 6 we use the decoded impulse. We could not have done this before the impulse was loaded and decoded.
Definition: "The DynamicsCompressorNode interface provides a compression effect, which lowers the volume of the loudest parts of the signal in order to help prevent clipping and distortion that can occur when multiple sounds are played and multiplexed together at once. This is often used in musical production and game audio."
It's usually a good idea to insert a compressor in your audio graph to give a louder, richer and fuller sound, and to prevent clipping. See the Dynamics Compressor node's documentation.
HTML:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>WebAudio example of compressor node</title>
</head>
<body>
<h1>Example of use of a compressor node</h1>
<p>Click the button to turn the compressor on/off. We set a huge gain on the signal in purpose, so that it clips and sounds saturated. Using a compressor with default property values will prevent clipping<p>
<audio src="https://mainline.i3s.unice.fr/mooc/guitarRiff1.mp3" id="compressorExample" controls loop crossorigin="anonymous"></audio>
<br>
<label for="gainSlider1">Gain</label>
<input type="range" min="0" max="10" step="0.01" value="8" id="gainSlider1" />
<button id="compressorButton">Turn compressor On</button>
</body>
</html>
JavaScript:
let audioContext;
let compressorExemple, gainSlider1, gainNode1, compressorNode;
let compressorButton;
let compressorOn = false;
window.onload = () => {
// get the AudioContext
audioContext = new AudioContext();
// the audio element
compressorExemple = document.querySelector('#compressorExample');
gainSlider1 = document.querySelector('#gainSlider1');
// button for turning on/off the compressor
compressorButton = document.querySelector('#compressorButton');
// fix for autoplay policy
compressorExemple.addEventListener('play',() => audioContext.resume());
buildAudioGraph();
// input listener on the gain slider
gainSlider1.oninput = (evt) => {
gainNode1.gain.value = evt.target.value;
};
compressorButton.onclick = (evt) => {
if(compressorOn) {
// disconnect the compressor and make a
// direct route from gain to destination
compressorNode.disconnect(audioContext.destination);
gainNode1.disconnect(compressorNode);
gainNode1.connect(audioContext.destination);
compressorButton.innerHTML="Turn compressor: On";
} else {
// compressor was off, we connect the gain to the compressor
// and the compressor to the destination
gainNode1.disconnect(audioContext.destination);
gainNode1.connect(compressorNode);
compressorNode.connect(audioContext.destination);
compressorButton.innerHTML="Turn compressor: Off";
}
compressorOn = !compressorOn;
};
};
function buildAudioGraph() {
// create source and gain node
let gainMediaElementSource = audioContext.createMediaElementSource(compressorExemple);
gainNode1 = audioContext.createGain();
gainNode1.gain.value = parseFloat(gainSlider1.value);
// do not connect it yet
compressorNode = audioContext.createDynamicsCompressor();
// connect nodes together
gainMediaElementSource.connect(gainNode1);
gainNode1.connect(audioContext.destination);
}
In this example, we set the gain to a very high value that will make a saturated sound. To prevent clipping, it is sufficient to add a compressor right at the end of the graph! Here we use the compressor with all default settings.
NB: This course does not go into detail about the different properties of the compressor node, as they are largely for musicians with the purpose of enabling the user to set subtle effects such as release, attack, etc.
Audio graph with the compressor activated (picture taken with the now discontinued FireFox WebAudio debugger, you should get similar results with the Chrome Audion extension):
Extract of the HTML code:
<audio src="https://mainline.i3s.unice.fr/mooc/guitarRiff1.mp3"
id="compressorExample" controls loop
crossorigin="anonymous"></audio>
<br>
<label for="gainSlider1">Gain</label>
<input type="range" min="0" max="10" step="0.01"
value="8" id="gainSlider1" />
<button id="compressorButton">Turn compressor On</button>
JavaScript source code:
// This line is a trick to initialize the AudioContext
// that will work on all recent browsers
var ctx = window.AudioContext || window.webkitAudioContext;
var audioContext;
var compressorExemple, gainSlider1, gainNode1, compressorNode;
var compressorButton;
var compressorOn = false;
window.onload = function() {
// get the AudioContext
audioContext = new ctx();
// the audio element
compressorExemple = document.querySelector('#compressorExample');
gainSlider1 = document.querySelector('#gainSlider1');
// button for turning on/off the compressor
compressorButton = document.querySelector('#compressorButton');
buildAudioGraph();
// input listener on the gain slider
gainSlider1.oninput = function(evt) {
gainNode1.gain.value = evt.target.value;
};
compressorButton.onclick = function(evt) {
if(compressorOn) {
// disconnect the compressor and make a
// direct route from gain to destination
compressorNode.disconnect(audioContext.destination);
gainNode1.disconnect(compressorNode);
gainNode1.connect(audioContext.destination);
compressorButton.innerHTML="Turn compressor: On";
} else {
// compressor was off, we connect the gain to the compressor
// and the compressor to the destination
gainNode1.disconnect(audioContext.destination);
gainNode1.connect(compressorNode);
compressorNode.connect(audioContext.destination);
compressorButton.innerHTML="Turn compressor: Off";
}
compressorOn = !compressorOn;
};
};
function buildAudioGraph() {
// create source and gain node
var gainMediaElementSource =
audioContext.createMediaElementSource(compressorExemple);
gainNode1 = audioContext.createGain();
gainNode1.gain.value = parseFloat(gainSlider1.value);
// do not connect it yet
compressorNode = audioContext.createDynamicsCompressor(); // connect nodes together
gainMediaElementSource.connect(gainNode1);
gainNode1.connect(audioContext.destination);
}
There is nothing special here compared to the other examples in this section, except that we have used a new method disconnect (line 32 and line 38), which is available on all types of nodes (except ctx.destination) to modify the graph on the fly. When the button is clicked, we remove or add a compressor in the audio graph (lines 28-42) and to achieve this, we disconnect and reconnect some of the nodes.