htl5语音录音带均衡化 [英] htl5 voice recording with isualization

查看:67
本文介绍了htl5语音录音带均衡化的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在构建一个HTML5软件,该软件可以记录声音,并且在播放该声音时,可视化器应该起作用.
这是我的代码:

I'm building a HTML5 software that records a voice and when playing that voice a visualizer should be in action.
Here is my code:

// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;

if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;

if (navigator.getUserMedia) {
    navigator.getUserMedia({
        audio: true
    }, success, function (e) {
        alert('Error capturing audio.');
    });
} else alert('getUserMedia not supported in this browser.');

// when pressing record

function getVal(value) {

    // if R is pressed, we start recording
    if (value == "record") {
        recording = true;
        // reset the buffers for the new recording
        leftchannel.length = rightchannel.length = 0;
        recordingLength = 0;
        document.getElementById('output').innerHTML = "Recording now...";

        // if S is pressed, we stop the recording and package the WAV file
    } else if (value == "stop") {

        // we stop recording
        recording = false;
        document.getElementById('output').innerHTML = "Building wav file...";

        // we flat the left and right channels down
        var leftBuffer = mergeBuffers(leftchannel, recordingLength);
        var rightBuffer = mergeBuffers(rightchannel, recordingLength);
        // we interleave both channels together
        var interleaved = interleave(leftBuffer, rightBuffer);


        var buffer = new ArrayBuffer(44 + interleaved.length * 2);
        var view = new DataView(buffer);

        // RIFF chunk descriptor
        writeUTFBytes(view, 0, 'RIFF');
        view.setUint32(4, 44 + interleaved.length * 2, true);
        writeUTFBytes(view, 8, 'WAVE');
        // FMT sub-chunk
        writeUTFBytes(view, 12, 'fmt ');
        view.setUint32(16, 16, true);
        view.setUint16(20, 1, true);
        // stereo (2 channels)
        view.setUint16(22, 2, true);
        view.setUint32(24, sampleRate, true);
        view.setUint32(28, sampleRate * 4, true);
        view.setUint16(32, 4, true);
        view.setUint16(34, 16, true);
        // data sub-chunk
        writeUTFBytes(view, 36, 'data');
        view.setUint32(40, interleaved.length * 2, true);


        var lng = interleaved.length;
        var index = 44;
        var volume = 1;
        for (var i = 0; i < lng; i++) {
            view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
            index += 2;
        }


        var blob = new Blob([view], {
            type: 'audio/wav'
        });

        // let's save it locally

        document.getElementById('output').innerHTML = 'Handing off the file now...';
        var url = (window.URL || window.webkitURL).createObjectURL(blob);

        var li = document.createElement('li');
        var au = document.createElement('audio');
        var hf = document.createElement('a');

        au.controls = true;
        au.src = url;
        hf.href = url;
        hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
        hf.innerHTML = hf.download;
        li.appendChild(au);
        li.appendChild(hf);
        recordingList.appendChild(li);

    }
}


function success(e) {

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);


    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);
    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 1024;

    analyser2 = context.createAnalyser();
    analyser2.smoothingTimeConstant = 0.0;
    analyser2.fftSize = 1024;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function (e) {
        if (!recording) return;
        var left = e.inputBuffer.getChannelData(0);
        var right = e.inputBuffer.getChannelData(1);

        // get the average of the first channel, bincount is fftsize / 2
        var array = new Uint8Array(analyser.frequencyBinCount);
        analyser.getByteFrequencyData(array);
        var average = getAverageVolume(array);

        // get the average for the second channel
        var array2 = new Uint8Array(analyser2.frequencyBinCount);
        analyser2.getByteFrequencyData(array2);
        var average2 = getAverageVolume(array2);
        // clear the current state
        ctx.clearRect(0, 0, 60, 130);

        // set the fill style
        ctx.fillStyle = gradient;

        // create the meters
        ctx.fillRect(0, 130 - average, 25, 130);
        ctx.fillRect(30, 130 - average2, 25, 130);
    }

    function getAverageVolume(array) {
        var values = 0;
        var average;

        var length = array.length;

        // get all the frequency amplitudes
        for (var i = 0; i < length; i++) {
            values += array[i];
        }

        average = values / length;
        return average;
    }

    leftchannel.push(new Float32Array(left));
    rightchannel.push(new Float32Array(right));
    recordingLength += bufferSize;


}

// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);

function mergeBuffers(channelBuffer, recordingLength) {
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++) {
        var buffer = channelBuffer[i];
        result.set(buffer, offset);
        offset += buffer.length;
    }
    return result;
}

function interleave(leftChannel, rightChannel) {
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length;) {
        result[index++] = leftChannel[inputIndex];
        result[index++] = rightChannel[inputIndex];
        inputIndex++;
    }
    return result;
}


function writeUTFBytes(view, offset, string) {
    var lng = string.length;
    for (var i = 0; i < lng; i++) {
        view.setUint8(offset + i, string.charCodeAt(i));
    }
}

我的问题是,运行时给我一个错误:

My problem is that when running it's giving me an error :

cannot read property 'connect' of null in this statement:  volume.connect(splitter);

出了什么问题?

推荐答案

仅在success函数中的getUserMedia成功后,才完成音量增益节点的创建.

The creation of the volume gain node is done only after the success of getUserMedia, in the success function.

当代码遇到volume connect命令时,尚未分配volume.

By the time the code encounter the volume connect command, volume is not yet allocated.

您必须从success开始链接"所有节点连接.

You have to 'chain' all your node connection starting from success.

快速修复:只需输入以下行即可:

Quick fix : just put those lines :

// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);

函数末尾的

.

at the end of the success function.

这篇关于htl5语音录音带均衡化的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆