使用Web Audio API创建完整轨道的波形 [英] Create a waveform of the full track with Web Audio API

查看:278
本文介绍了使用Web Audio API创建完整轨道的波形的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

实时移动波形



我目前正在播放Web Audio API,并使用画布制作频谱。

  function animate(){
var a = new Uint8Array(analyser.frequencyBinCount),
y = new Uint8Array(analyser.frequencyBinCount),b,c,d;
analyzeser.getByteTimeDomainData(y);
analyzeser.getByteFrequencyData(a);
b = c = a.length
d = w / c;
ctx.clearRect(0,0,w,h);
while(b - ){
var bh = a [b] +1;
ctx.fillStyle ='hsla('+(b / c * 240)+','+(y [b] / 255 * 100 | 0)+'%,50%,1)
ctx.fillRect(1 * b,h-bh,1,bh);
ctx.fillRect(1 * b,y [b],1,1);
}
animation = webkitRequestAnimationFrame(animate);
}

小问题:有办法不写2次 new Uint8Array(analyser.frequencyBinCount)



DEMO



添加MP3 / MP4文件并等待。 (在Chrome中测试)



http://jsfiddle.net/pc76H/2 /



但是有很多问题。我找不到各种音频过滤器的正确文档。



此外,如果你看看频谱,你会注意到,在70%或范围后没有数据。这意味着什么?也许从16k hz到20k hz是没有声音?我将应用一个文本到画布来显示各种HZ。但是其中??



我发现返回的数据长度为32,最大为2048
,高度始终为256。 / p>

但真正的问题是...我想在traktor中创建一个移动波形。



我已经做了一段时间以前与PHP它转换文件低比特率提取的数据和覆盖到一个图像。我发现脚本在某个地方...但我不记得哪里...
note:需要 LAME



 <?php 
$ a = $ _ GET [f];
if(file_exists($ a)){
if(file_exists($ a。。png)){
header(Content-Type:image / png);
echo file_get_contents($ a。。png);
} else {
$ b = 3000; $ c = 300; define(d,3);
ini_set(max_execution_time,30000);
function n($ g,$ h){
$ g = hexdec(bin2hex($ g));
$ h = hexdec(bin2hex($ h));
return($ g +($ h * 256));
};
$ k = substr(md5(time()),0,10);
copy(realpath($ a),/ var / www /\".$ k。_ o.mp3);
exec(lame /var/www/{$k}_o.mp3 -f -mm -b 16 --resample 8 /var/www/{$k}.mp3& lame --decode /var/www/{$k}.mp3 /var/www/{$k}.wav);
// system(lame {$ k} _o.mp3 -f -mm -b 16 --resample 8 {$ k} .mp3& lame --decode {$ k} .mp3 {$ k} .wav);
@unlink(/ var / www / {$ k} _o.mp3);
@unlink(/ var / www / {$ k} .mp3);
$ l =/ var / www / {$ k} .wav;
$ m = fopen($ l,r);
$ n [] = fread($ m,4);
$ n [] = bin2hex(fread($ m,4));
$ n [] = fread($ m,4);
$ n [] = fread($ m,4);
$ n [] = bin2hex(fread($ m,4));
$ n [] = bin2hex(fread($ m,2));
$ n [] = bin2hex(fread($ m,2));
$ n [] = bin2hex(fread($ m,4));
$ n [] = bin2hex(fread($ m,4));
$ n [] = bin2hex(fread($ m,2));
$ n [] = bin2hex(fread($ m,2));
$ n [] = fread($ m,4);
$ n [] = bin2hex(fread($ m,4));
$ o = hexdec(substr($ n [10],0,2));
$ p = $ o / 8;
$ q = hexdec(substr($ n [6],0,2));
if($ q == 2){$ r = 40;} else {$ r = 80;};
while(!feof($ m)){
$ t = array();
for($ i = 0; $ i< $ p; $ i ++){
$ t [$ i] = fgetc($ m);
};
switch($ p){
case 1:$ s [] = n($ t [0],$ t [1]); break;
case 2:if(ord($ t [1])& 128){$ u = 0;} else {$ u = 128;}; $ u = chr )& 127)+ $ u); $ s [] = floor(n($ t [0],$ u)/ 256)
};
fread($ m,$ r);
};
fclose($ m);
unlink(/ var / www / {$ k} .wav);
$ x = imagecreatetruecolor(sizeof($ s)/ d,$ c);
imagealphablending($ x,false);
imagesavealpha($ x,true);
$ y = imagecolorallocatealpha($ x,255,255,255,127);
imagefilledrectangle($ x,0,0,sizeof($ s)/ d,$ c,$ y);
for($ d = 0; $ d< sizeof($ s); $ d + = d){
$ v =(int)($ s [$ d] / 255 * $ c);
imageline($ x,$ d / d,0 +($ c- $ v),$ d / d,$ c - ($ c- $ v),imagecolorallocate($ x,255,0,255)) ;
};
$ z = imagecreatetruecolor($ b,$ c);
imagealphablending($ z,false);
imagesavealpha($ z,true);
imagefilledrectangle($ z,0,0,$ b,$ c,$ y);
imagecopyresampled($ z,$ x,0,0,0,0,$ b,$ c,sizeof($ s)/ d,$ c);
imagepng($ z,realpath($ a)。。png);
header(Content-Type:image / png);
imagepng($ z);
imagedestroy($ z);
};
} else {
echo $ a;
};

?>

脚本可以工作...但是你的图片大小上限为4k像素。 p>

所以你没有一个很好的波形,如果它应该只存在几毫秒。



存储/创建一个实时波形像traktors应用程序或这个PHP脚本?






我重写了你的脚本,它适合我的想法...它相对较快。



正如你可以看到内部的函数createArray i push line into a object with the key as x coordinate。



我只是取最高的数字。



这里是我们可以使用的颜色。

  var ajaxB,AC,B,LC ,op,x,y,ARRAY = {},W = 1024,H = 256; 
var aMax = Math.max.apply.bind(Math.max,Math);
function error(a){
console.log(a);
};
function createDrawing(){
console.log('drawingArray');
var C = document.createElement('canvas');
C.width = W;
C.height = H;
document.body.appendChild(C);
var context = C.getContext('2d');
context.save();
context.strokeStyle ='#121';
context.globalCompositeOperation ='lighter';
L2 = W * 1;
while(L2 - ){
context.beginPath();
context.moveTo(L2,0);
context.lineTo(L2 + 1,ARRAY [L2]);
context.stroke();
}
context.restore();
};
function createArray(a){
console.log('creatingArray');
B = a;
LC = B.getChannelData(0); // Float32Array描述左声道
L = LC.length;
op = W / L;
for(var i = 0; i x = W * i / L | 0;
y = LC [i] * H / 2;
if(ARRAY [x]){
ARRAY [x] .push(y)
} else {
!ARRAY [x-1] 1] = aMax(ARRAY [x-1]));
//上面的行包含一个值数组
//可以转换为颜色
//或只是简单地创建一个梯度
//基于avg max min (frequency ???)whatever
ARRAY [x] = [y]
}
};
createDrawing();
};
function decode(){
console.log('decodingMusic');
AC = new webkitAudioContext
AC.decodeAudioData(this.response,createArray,error);
};
function loadMusic(url){
console.log('loadingMusic');
ajaxB = new XMLHttpRequest;
ajaxB.open('GET',url);
ajaxB.responseType ='arraybuffer';
ajaxB.onload = decode;
ajaxB.send();
}
loadMusic('AudioOrVideo.mp4');


解决方案

好的,我会做的是加载声音与XMLHttpRequest,然后使用webaudio解码,然后显示它仔细有你正在寻找的颜色。



我刚刚做了一个快速版本,从我的各个项目复制粘贴,它是相当工作,你可能看到这张图片:



>



问题是,它是慢的地狱。要有更多的速度,你必须做一些计算,以减少在画布上绘制的线数,因为在441000 Hz,你很快得到太多的线绘制。

  //音频上下文
window.AudioContext = window.AudioContext || window.webkitAudioContext;

if(!AudioContext)alert('此网站无法在您的浏览器中运行,请尝试使用最新的Chrome或Firefox。

var audioContext = new AudioContext();
var currentBuffer = null;

// CANVAS
var canvasWidth = 512,canvasHeight = 120;
var newCanvas = createCanvas(canvasWidth,canvasHeight);
var context = null;

window.onload = appendCanvas;
function appendCanvas(){document.body.appendChild(newCanvas);
context = newCanvas.getContext('2d'); }

// MUSIC LOADER + DECODE
function loadMusic(url){
var req = new XMLHttpRequest();
req.open(GET,url,true);
req.responseType =arraybuffer;
req.onreadystatechange = function(e){
if(req.readyState == 4){
if(req.status == 200)
audioContext.decodeAudioData(req。响应,
函数(缓冲区){
currentBuffer =缓冲区;
displayBuffer(缓冲区);
},onDecodeError);
else
alert('load.Wrong url or cross origin issue'时出错);
}
};
req.send();
}

function onDecodeError(){alert('decode while file。'); }

// MUSIC DISPLAY
function displayBuffer(buff / * is a AudioBuffer * /){
var leftChannel = buff.getChannelData(0); // Float32Array describe left channel
var lineOpacity = canvasWidth / leftChannel.length;
context.save();
context.fillStyle ='#222';
context.fillRect(0,0,canvasWidth,canvasHeight);
context.strokeStyle ='#121';
context.globalCompositeOperation ='lighter';
context.translate(0,canvasHeight / 2);
context.globalAlpha = 0.06; // lineOpacity;
for(var i = 0; i< leftChannel.length; i ++){
//我们在哪行上得到?
var x = Math.floor(canvasWidth * i / leftChannel.length);
var y = leftChannel [i] * canvasHeight / 2;
context.beginPath();
context.moveTo(x,0);
context.lineTo(x + 1,y);
context.stroke();
}
context.restore();
console.log('done');
}

function createCanvas(w,h){
var newCanvas = document.createElement('canvas');
newCanvas.width = w; newCanvas.height = h;
return newCanvas;
};


loadMusic('could_be_better.mp3');

编辑:这里的问题是我们有太多的数据要绘制。拿3分钟mp3,你会有3 * 60 * 44100 =约8.000.000线画。在具有1024像素分辨率的显示器上,这使得每像素8.000行...

在上面的代码中,画布正在进行重采样,通过绘制低不透明度的线,在'ligther'组合模式(例如像素的r,g,b将相加)。

为了加速的事情,你必须自己重新取样,但是要得到一些颜色,它不是只是一个下采样,你必须处理一组(在性能数组中最可能)的桶,每个水平像素一个(所以,说1024),在每个桶你计算累积的声压,方差,最小值,最大值,然后在显示时,决定如何使用颜色渲染它。

例如:

在0 positiveMin之间的值非常清楚。 (任何样本都低于该点)。

在positiveMin和positiveAverage之间的值 - 方差更暗,

在positiveAverage - variance和positiveAverage + variance之间的值更暗,

和在positiveAverage +方差和positiveMax之间的值。

(对于负值同样)
这使得每个桶5种颜色,它仍然是一些工作,为你代码和浏览器进行计算。

我不知道这个性能是否可以胜任,但我担心统计的准确性和你提到的软件的颜色编码不能在浏览器上达到显然不是实时的),并且你必须做一些妥协。



编辑2:

我试图得到一些颜色失去统计,但它很失败。我的猜想,现在,是tracktor的家伙也根据频率改变颜色....这里有一些工作....



无论如何,只是为了记录,平均/平均变化的代码如下。

(方差太低,我不得不使用平均变化)。



>

  //音乐显示
function displayBuffer2(buff / *是AudioBuffer * /){
var leftChannel = buff.getChannelData (0); // Float32Array描述左声道
//我们用累加,计数,方差$ resample'
//偏移0:PositiveCumul 1:PositiveCount 2:PositiveVariance
// 3:NegativeCumul 4:NegativeCount 5:NegativeVariance
//每桶6个数据
var resampled = new Float64Array(canvasWidth * 6);
var i = 0,j = 0,buckIndex = 0;
var min = 1e3,max = -1e3;
var thisValue = 0,res = 0;
var sampleCount = leftChannel.length;
// first pass for mean
for(i = 0; i //在哪个桶中我们会掉?
buckIndex = 0 | (canvasWidth * i / sampleCount);
buckIndex * = 6;
//正或负?
thisValue = leftChannel [i];
if(thisValue> 0){
resampled [buckIndex] + = thisValue;
resampled [buckIndex + 1] + = 1;
} else if(thisValue< 0){
resampled [buckIndex + 3] + = thisValue;
resampled [buckIndex + 4] + = 1;
}
if(thisValue< min)min = thisValue;
if(thisValue> max)max = thisValue;
}
//计算平均值
for(i = 0,j = 0; i if(resampled [j + 1 ]!= 0){
resampled [j] / = resampled [j + 1]; ;
}
if(resampled [j + 4]!= 0){
resampled [j + 3] / = resampled [j + 4]
}
}
//平均变化的第二遍(方差太低)
for(i = 0; i //在哪个桶我们下降?
buckIndex = 0 | (canvasWidth * i / leftChannel.length);
buckIndex * = 6;
//正或负?
thisValue = leftChannel [i];
if(thisValue> 0){
resampled [buckIndex + 2] + = Math.abs(resampled [buckIndex] - thisValue);
} else if(thisValue< 0){
resampled [buckIndex + 5] + = Math.abs(resampled [buckIndex + 3] - thisValue);
}
}
//计算平均变化/方差现在
for(i = 0,j = 0; i if(重采样[j + 1])重采样[j + 2] / =重采样[j + 1]
if(resampled [j + 4])resampled [j + 5] / = resampled [j + 4];
}
context.save()
context.fillStyle ='#000';
context.fillRect(0,0,canvasWidth,canvasHeight);
context.translate(0.5,canvasHeight / 2);
context.scale(1,200);

for(var i = 0; i< canvasWidth; i ++){
j = i * 6;
// draw from positiveAvg - variance to negativeAvg - variance
context.strokeStyle ='#F00';
context.beginPath();
context.moveTo(i,(resampled [j] - resampled [j + 2])));
context.lineTo(i,(resampled [j + 3] + resampled [j + 5]));
context.stroke();
//从positiveAvg中抽取 - 方差为positiveAvg +方差
context.strokeStyle ='#FFF';
context.beginPath();
context.moveTo(i,(resampled [j] - resampled [j + 2])));
context.lineTo(i,(resampled [j] + resampled [j + 2])));
context.stroke();
//从negativeAvg +方差到negativeAvg - 方差
// context.strokeStyle ='#FFF';
context.beginPath();
context.moveTo(i,(resampled [j + 3] + resampled [j + 5]))
context.lineTo(i,(resampled [j + 3] - resampled [j + 5]))
context.stroke();
}
context.restore();
console.log('done 231 iyi');
}


Realtime moving Waveform

I'm currently playing with Web Audio API and made a spectrum using canvas.

function animate(){
 var a=new Uint8Array(analyser.frequencyBinCount),
     y=new Uint8Array(analyser.frequencyBinCount),b,c,d;
 analyser.getByteTimeDomainData(y);
 analyser.getByteFrequencyData(a);
 b=c=a.length;
 d=w/c;
 ctx.clearRect(0,0,w,h);
 while(b--){
  var bh=a[b]+1;
  ctx.fillStyle='hsla('+(b/c*240)+','+(y[b]/255*100|0)+'%,50%,1)';
  ctx.fillRect(1*b,h-bh,1,bh);
  ctx.fillRect(1*b,y[b],1,1);
 }
 animation=webkitRequestAnimationFrame(animate);
}

Mini question: is there a way to not write 2 times new Uint8Array(analyser.frequencyBinCount)?

DEMO

add a MP3/MP4 file and wait. (tested in Chrome)

http://jsfiddle.net/pc76H/2/

But there are many problems. I can't find a proper documentation of the various audio filters.

Also, if you look at the spectrum you will notice that after 70% or the range there is no data. What does that mean? that maybe from 16k hz to 20k hz is no sound? I would apply a text to the canvas to show the various HZ. but where??

I found out that the returned data is a power of 32 in length with a max of 2048 and the height is always 256.

BUT the real question is ... I want to create a moving waveform like in traktor.

I already did that some time ago with PHP it converts the file to low bitrate than extracts the data and coverts that to a image. i found the script somewhere...but I don't remember where... note: needs LAME

<?php
$a=$_GET["f"];
if(file_exists($a)){
    if(file_exists($a.".png")){
        header("Content-Type: image/png");
        echo file_get_contents($a.".png");
    }else{
        $b=3000;$c=300;define("d",3);
        ini_set("max_execution_time","30000");
        function n($g,$h){
            $g=hexdec(bin2hex($g));
            $h=hexdec(bin2hex($h));
            return($g+($h*256));
        };
        $k=substr(md5(time()),0,10);
        copy(realpath($a),"/var/www/".$k."_o.mp3");
        exec("lame /var/www/{$k}_o.mp3 -f -m m -b 16 --resample 8 /var/www/{$k}.mp3 && lame --decode /var/www/{$k}.mp3 /var/www/{$k}.wav");
        //system("lame {$k}_o.mp3 -f -m m -b 16 --resample 8 {$k}.mp3 && lame --decode {$k}.mp3 {$k}.wav");
        @unlink("/var/www/{$k}_o.mp3");
        @unlink("/var/www/{$k}.mp3");
        $l="/var/www/{$k}.wav";
        $m=fopen($l,"r");
        $n[]=fread($m,4);
        $n[]=bin2hex(fread($m,4));
        $n[]=fread($m,4);
        $n[]=fread($m,4);
        $n[]=bin2hex(fread($m,4));
        $n[]=bin2hex(fread($m,2));
        $n[]=bin2hex(fread($m,2));
        $n[]=bin2hex(fread($m,4));
        $n[]=bin2hex(fread($m,4));
        $n[]=bin2hex(fread($m,2));
        $n[]=bin2hex(fread($m,2));
        $n[]=fread($m,4);
        $n[]=bin2hex(fread($m,4));
        $o=hexdec(substr($n[10],0,2));
        $p=$o/8;
        $q=hexdec(substr($n[6],0,2));
        if($q==2){$r=40;}else{$r=80;};
        while(!feof($m)){
            $t=array();
            for($i=0;$i<$p;$i++){
                $t[$i]=fgetc($m);
            };
            switch($p){
                case 1:$s[]=n($t[0],$t[1]);break;
                case 2:if(ord($t[1])&128){$u=0;}else{$u=128;};$u=chr((ord($t[1])&127)+$u);$s[]= floor(n($t[0],$u)/256);break;
            };
            fread($m,$r);
        };
        fclose($m);
        unlink("/var/www/{$k}.wav");
        $x=imagecreatetruecolor(sizeof($s)/d,$c);
        imagealphablending($x,false);
        imagesavealpha($x,true);
        $y=imagecolorallocatealpha($x,255,255,255,127);
        imagefilledrectangle($x,0,0,sizeof($s)/d,$c,$y);
        for($d=0;$d<sizeof($s);$d+=d){
            $v=(int)($s[$d]/255*$c);
            imageline($x,$d/d,0+($c-$v),$d/d,$c-($c-$v),imagecolorallocate($x,255,0,255));
        };
        $z=imagecreatetruecolor($b,$c);
        imagealphablending($z,false);
        imagesavealpha($z,true);
        imagefilledrectangle($z,0,0,$b,$c,$y);
        imagecopyresampled($z,$x,0,0,0,0,$b,$c,sizeof($s)/d,$c);
        imagepng($z,realpath($a).".png");
        header("Content-Type: image/png");
        imagepng($z);
        imagedestroy($z);
    };
}else{
    echo $a;
};

?>

The script works... but you are limited to a max image size of 4k pixels.

so you have not a nice waveform if it should rappresent only some milliseconds.

What do i need to store/create a realtime waveform like the traktors app or this php script? btw the traktor has also a colored waveform(the php script not).

EDIT

I rewrote your script that it fits my idea... it's relatively fast.

As you can see inside the function createArray i push the various lines into an object with the key as x coordinate.

I'm simply taking the the highest number.

here is where we could play with the colors.

var ajaxB,AC,B,LC,op,x,y,ARRAY={},W=1024,H=256;
var aMax=Math.max.apply.bind(Math.max, Math);
function error(a){
 console.log(a);
};
function createDrawing(){
 console.log('drawingArray');
 var C=document.createElement('canvas');
 C.width=W;
 C.height=H;
 document.body.appendChild(C);
 var context=C.getContext('2d');
 context.save();
 context.strokeStyle='#121';
 context.globalCompositeOperation='lighter';
 L2=W*1;
 while(L2--){
  context.beginPath();
  context.moveTo(L2,0);
  context.lineTo(L2+1,ARRAY[L2]);
  context.stroke();
 }
 context.restore();
};
function createArray(a){
 console.log('creatingArray');
 B=a;
 LC=B.getChannelData(0);// Float32Array describing left channel
 L=LC.length;  
 op=W/L;
 for(var i=0;i<L;i++){
  x=W*i/L|0;
  y=LC[i]*H/2;
  if(ARRAY[x]){
   ARRAY[x].push(y)
  }else{
   !ARRAY[x-1]||(ARRAY[x-1]=aMax(ARRAY[x-1]));
   // the above line contains an array of values
   // which could be converted to a color 
   // or just simply create a gradient 
   // based on avg max min (frequency???) whatever
   ARRAY[x]=[y]
  }
 };
 createDrawing();
};
function decode(){
 console.log('decodingMusic');
 AC=new webkitAudioContext
 AC.decodeAudioData(this.response,createArray,error);
};
function loadMusic(url){
 console.log('loadingMusic');   
 ajaxB=new XMLHttpRequest;
 ajaxB.open('GET',url);
 ajaxB.responseType='arraybuffer';    
 ajaxB.onload=decode;
 ajaxB.send();
}
loadMusic('AudioOrVideo.mp4');

解决方案

Ok, so what i would do is to load the sound with an XMLHttpRequest, then decode it using webaudio, then display it 'carefully' to have the colors you are searching for.

I just made a quick version, copy-pasting from various of my projects, it is quite working, as you might see with this picture :

The issue is that it is slow as hell. To have (more) decent speed, you'll have to do some computation to reduce the number of lines to draw on the canvas, because at 441000 Hz, you very quickly get too many lines to draw.

// AUDIO CONTEXT
window.AudioContext = window.AudioContext || window.webkitAudioContext ;

if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');

var audioContext = new AudioContext();
var currentBuffer  = null;

// CANVAS
var canvasWidth = 512,  canvasHeight = 120 ;
var newCanvas   = createCanvas (canvasWidth, canvasHeight);
var context     = null;

window.onload = appendCanvas;
function appendCanvas() { document.body.appendChild(newCanvas);
                          context = newCanvas.getContext('2d'); }

// MUSIC LOADER + DECODE
function loadMusic(url) {   
    var req = new XMLHttpRequest();
    req.open( "GET", url, true );
    req.responseType = "arraybuffer";    
    req.onreadystatechange = function (e) {
          if (req.readyState == 4) {
             if(req.status == 200)
                  audioContext.decodeAudioData(req.response, 
                    function(buffer) {
                             currentBuffer = buffer;
                             displayBuffer(buffer);
                    }, onDecodeError);
             else
                  alert('error during the load.Wrong url or cross origin issue');
          }
    } ;
    req.send();
}

function onDecodeError() {  alert('error while decoding your file.');  }

// MUSIC DISPLAY
function displayBuffer(buff /* is an AudioBuffer */) {
   var leftChannel = buff.getChannelData(0); // Float32Array describing left channel     
   var lineOpacity = canvasWidth / leftChannel.length  ;      
   context.save();
   context.fillStyle = '#222' ;
   context.fillRect(0,0,canvasWidth,canvasHeight );
   context.strokeStyle = '#121';
   context.globalCompositeOperation = 'lighter';
   context.translate(0,canvasHeight / 2);
   context.globalAlpha = 0.06 ; // lineOpacity ;
   for (var i=0; i<  leftChannel.length; i++) {
       // on which line do we get ?
       var x = Math.floor ( canvasWidth * i / leftChannel.length ) ;
       var y = leftChannel[i] * canvasHeight / 2 ;
       context.beginPath();
       context.moveTo( x  , 0 );
       context.lineTo( x+1, y );
       context.stroke();
   }
   context.restore();
   console.log('done');
}

function createCanvas ( w, h ) {
    var newCanvas = document.createElement('canvas');
    newCanvas.width  = w;     newCanvas.height = h;
    return newCanvas;
};


loadMusic('could_be_better.mp3');

Edit : The issue here is that we have too much data to draw. Take a 3 minutes mp3, you'll have 3*60*44100 = about 8.000.000 line to draw. On a display that has, say, 1024 px resolution, that makes 8.000 lines per pixel...
In the code above, the canvas is doing the 'resampling', by drawing lines with low-opacity and in 'ligther' composition mode (e.g. pixel's r,g,b will add-up).
To speed-up things, you have to re-sample by yourself, but to get some colors, it's not just a down-sampling, you'll have to handle a set (within a performance array most probably) of 'buckets', one for each horizontal pixel (so, say 1024), and in every bucket you compute the cumulated sound pressure, the variance, min, max and then, at display time, you decide how you will render that with colors.
For instance :
values between 0 positiveMin are very clear. (any sample is below that point).
values between positiveMin and positiveAverage - variance are darker,
values between positiveAverage - variance and positiveAverage + variance are darker,
and values between positiveAverage+variance and positiveMax lighter .
(same for negative values) That makes 5 colors for each bucket, and it's still quite some work, for you to code and for the browser to compute.
I don't know if the performance could get decent with this, but i fear the statistics accuracy and the color coding of the software you mention can't be reached on a browser (obviously not in real-time), and that you'll have to make some compromises.

Edit 2 :
I tried to get some colors out of stats but it quite failed. My guess, now, is that the guys at tracktor also change color depending on frequency.... quite some work here....

Anyway, just for the record, the code for an average / mean variation follows.
(variance was too low, i had to use mean variation).

// MUSIC DISPLAY
function displayBuffer2(buff /* is an AudioBuffer */) {
   var leftChannel = buff.getChannelData(0); // Float32Array describing left channel       
   // we 'resample' with cumul, count, variance
   // Offset 0 : PositiveCumul  1: PositiveCount  2: PositiveVariance
   //        3 : NegativeCumul  4: NegativeCount  5: NegativeVariance
   // that makes 6 data per bucket
   var resampled = new Float64Array(canvasWidth * 6 );
   var i=0, j=0, buckIndex = 0;
   var min=1e3, max=-1e3;
   var thisValue=0, res=0;
   var sampleCount = leftChannel.length;
   // first pass for mean
   for (i=0; i<sampleCount; i++) {
        // in which bucket do we fall ?
        buckIndex = 0 | ( canvasWidth * i / sampleCount );
        buckIndex *= 6;
        // positive or negative ?
        thisValue = leftChannel[i];
        if (thisValue>0) {
            resampled[buckIndex    ] += thisValue;
            resampled[buckIndex + 1] +=1;               
        } else if (thisValue<0) {
            resampled[buckIndex + 3] += thisValue;
            resampled[buckIndex + 4] +=1;                           
        }
        if (thisValue<min) min=thisValue;
        if (thisValue>max) max = thisValue;
   }
   // compute mean now
   for (i=0, j=0; i<canvasWidth; i++, j+=6) {
       if (resampled[j+1] != 0) {
             resampled[j] /= resampled[j+1]; ;
       }
       if (resampled[j+4]!= 0) {
             resampled[j+3] /= resampled[j+4];
       }
   }
   // second pass for mean variation  ( variance is too low)
   for (i=0; i<leftChannel.length; i++) {
        // in which bucket do we fall ?
        buckIndex = 0 | (canvasWidth * i / leftChannel.length );
        buckIndex *= 6;
        // positive or negative ?
        thisValue = leftChannel[i];
        if (thisValue>0) {
            resampled[buckIndex + 2] += Math.abs( resampled[buckIndex] - thisValue );               
        } else  if (thisValue<0) {
            resampled[buckIndex + 5] += Math.abs( resampled[buckIndex + 3] - thisValue );                           
        }
   }
   // compute mean variation/variance now
   for (i=0, j=0; i<canvasWidth; i++, j+=6) {
        if (resampled[j+1]) resampled[j+2] /= resampled[j+1];
        if (resampled[j+4]) resampled[j+5] /= resampled[j+4];   
   }
   context.save();
   context.fillStyle = '#000' ;
   context.fillRect(0,0,canvasWidth,canvasHeight );
   context.translate(0.5,canvasHeight / 2);   
  context.scale(1, 200);

   for (var i=0; i< canvasWidth; i++) {
        j=i*6;
       // draw from positiveAvg - variance to negativeAvg - variance 
       context.strokeStyle = '#F00';
       context.beginPath();
       context.moveTo( i  , (resampled[j] - resampled[j+2] ));
       context.lineTo( i  , (resampled[j +3] + resampled[j+5] ) );
       context.stroke();
       // draw from positiveAvg - variance to positiveAvg + variance 
       context.strokeStyle = '#FFF';
       context.beginPath();
       context.moveTo( i  , (resampled[j] - resampled[j+2] ));
       context.lineTo( i  , (resampled[j] + resampled[j+2] ) );
       context.stroke();
       // draw from negativeAvg + variance to negativeAvg - variance 
       // context.strokeStyle = '#FFF';
       context.beginPath();
       context.moveTo( i  , (resampled[j+3] + resampled[j+5] ));
       context.lineTo( i  , (resampled[j+3] - resampled[j+5] ) );
       context.stroke();
   }
   context.restore();
   console.log('done 231 iyi');
}

这篇关于使用Web Audio API创建完整轨道的波形的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆