项目希望可以把音频可视化,有条随声音波动的曲线或者是像唱吧那种。开始是搜到了腾讯大腿(TGideas)写的audio可视化组件,想着直接用,后来各种原因还是打算自己重新写一个……虽然明显写得low了很多。
腾讯大腿的audio组件地址
http://www.3fwork.com/b403/001620MYM013253/
GitHub
https://github.com/tgideas/motion/blob/master/component/src/main/audio/audio.js
然后参照了官方api
https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API
还有一篇也是audio可视化的文章
http://www.mizuiren.com/330.html
注意audioContext.currentTime是从audioContext开始创建之后开始计算的
代码:
var Visualizer = function(config) {
this.audioContext = null;
this.analyser = null;
this.source = null; //the audio source
this.config = config;
this.frequency = [];
this.playing = false;
this.ready = false;
this.loadFailed = false;
};
Visualizer.prototype = {
init: function () {
this._prepare();
this.getData();
this._analyser();
},
_prepare: function () {
//实例化一个音频上下文类型window.AudioContext。目前Chrome和Firefox对其提供了支持,但需要相应前缀,Chrome中为window.webkitAudioContext,Firefox中为mozAudioContext。
// 所以为了让代码更通用,能够同时工作在两种浏览器中,只需要一句代码将前缀进行统一即可。
window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext;
try {
this.audioContext = new AudioContext();
} catch (e) {
console.log(e);
}
},
_analyser: function () {
var that = this;
that.analyser = that.audioContext.createAnalyser();
that.analyser.smoothingTimeConstant = 0.85;
that.analyser.fftSize = 32;//傅里叶变换参数 简化成16个元素数组
//将source与分析器连接
that.source.connect(that.analyser);
//将分析器与destination连接,这样才能形成到达扬声器的通路
that.analyser.connect(that.audioContext.destination);
that.frequency = new Uint8Array(that.analyser.frequencyBinCount);
},
getData: function () {
var that = this;
//建立缓存源
that.source = that.audioContext.createBufferSource();
var request = new XMLHttpRequest();
//请求资源
request.open('GET', that.config.url, true);
request.responseType = 'arraybuffer';
request.onreadystatechange=function() {
if (request.readyState === 4) {
if (request.status === 200) {
that.ready = true;
} else {
that.loadFailed = true;
}
}
};
request.onload = function() {
var audioData = request.response;
//解码
that.audioContext.decodeAudioData(audioData, function(buffer) {
that.source.buffer = buffer;
// console.log(buffer.duration);//资源长度
// that.source.connect(that.audioContext.destination);
//将audioBufferSouceNode连接到audioContext.destination,
// 这个AudioContext的destination也就相关于speaker(扬声器)。
that.source.loop = that.config.loop||false;
},
function(e){"Error with decoding audio data" + e.err});
};
request.send();
},
play: function () {
var that = this;
that.source.start(0);
that.playing = true;
var timer = setInterval(function () {
that.analyser.getByteFrequencyData(that.frequency);
if (that.source.buffer){
if (that.audioContext.currentTime>that.source.buffer.duration){
that.source.stop(0);
that.playing = false;
clearInterval(timer);
}
}
},100);
},
stop: function () {
var that = this;
that.source.stop(0);
that.playing = false;
}
};
调用方法:
var v=new Visualizer({
url:"2.mp3",//audio地址 没有写兼容跨域的方法,所以不能跨域
loop:false//是否循环
});
v.init();
v.play();
setInterval(function () {
if(v.ready){
console.log("ready!");
} else if(v.loadFailed){
console.log("加载失败");
}
if (v.playing){ //playing判断是否在播放
console.log(v.frequency);//frequency是长度为16的频率值数组
}
},100);