从浏览器访问麦克风 - Javascript [英] Access microphone from a browser - Javascript

查看:50
本文介绍了从浏览器访问麦克风 - Javascript的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

是否可以使用客户端 JavaScript 从浏览器访问麦克风(内置或辅助)?

Is it possible to access the microphone (built-in or auxiliary) from a browser using client-side JavaScript?

理想情况下,它会将录制的音频存储在浏览器中.谢谢!

Ideally, it would store the recorded audio in the browser. Thanks!

推荐答案

这里我们使用 getUserMedia() 将麦克风音频捕获为 Web Audio API 事件循环缓冲区......每个音频事件循环缓冲区的时域和频域片段是打印(在浏览器控制台中可见,只需按 F12 或 ctrl+shift+i 键)

Here we capture microphone audio as a Web Audio API event loop buffer using getUserMedia() ... time domain and frequency domain snippets of each audio event loop buffer are printed (viewable in browser console just hit key F12 or ctrl+shift+i )

<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone audio into buffer</title>

<script type="text/javascript">


  var webaudio_tooling_obj = function () {

    var audioContext = new AudioContext();

    console.log("audio is starting up ...");

    var BUFF_SIZE = 16384;

    var audioInput = null,
        microphone_stream = null,
        gain_node = null,
        script_processor_node = null,
        script_processor_fft_node = null,
        analyserNode = null;

    if (!navigator.getUserMedia)
            navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
                          navigator.mozGetUserMedia || navigator.msGetUserMedia;

    if (navigator.getUserMedia){

        navigator.getUserMedia({audio:true}, 
          function(stream) {
              start_microphone(stream);
          },
          function(e) {
            alert('Error capturing audio.');
          }
        );

    } else { alert('getUserMedia not supported in this browser.'); }

    // ---

    function show_some_data(given_typed_array, num_row_to_display, label) {

        var size_buffer = given_typed_array.length;
        var index = 0;
        var max_index = num_row_to_display;

        console.log("__________ " + label);

        for (; index < max_index && index < size_buffer; index += 1) {

            console.log(given_typed_array[index]);
        }
    }

    function process_microphone_buffer(event) { // invoked by event loop

        var i, N, inp, microphone_output_buffer;

        microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now

        // microphone_output_buffer  <-- this buffer contains current gulp of data size BUFF_SIZE

        show_some_data(microphone_output_buffer, 5, "from getChannelData");
    }

    function start_microphone(stream){

      gain_node = audioContext.createGain();
      gain_node.connect( audioContext.destination );

      microphone_stream = audioContext.createMediaStreamSource(stream);
      microphone_stream.connect(gain_node); 

      script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE, 1, 1);
      script_processor_node.onaudioprocess = process_microphone_buffer;

      microphone_stream.connect(script_processor_node);

      // --- enable volume control for output speakers

      document.getElementById('volume').addEventListener('change', function() {

          var curr_volume = this.value;
          gain_node.gain.value = curr_volume;

          console.log("curr_volume ", curr_volume);
      });

      // --- setup FFT

      script_processor_fft_node = audioContext.createScriptProcessor(2048, 1, 1);
      script_processor_fft_node.connect(gain_node);

      analyserNode = audioContext.createAnalyser();
      analyserNode.smoothingTimeConstant = 0;
      analyserNode.fftSize = 2048;

      microphone_stream.connect(analyserNode);

      analyserNode.connect(script_processor_fft_node);

      script_processor_fft_node.onaudioprocess = function() {

        // get the average for the first channel
        var array = new Uint8Array(analyserNode.frequencyBinCount);
        analyserNode.getByteFrequencyData(array);

        // draw the spectrogram
        if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {

            show_some_data(array, 5, "from fft");
        }
      };
    }

  }(); //  webaudio_tooling_obj = function()



</script>

</head>
<body>

    <p>Volume</p>
    <input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>

</body>
</html>

由于此代码将麦克风数据公开为缓冲区,因此您可以添加使用 websockets 进行流式传输的能力,或者简单地将每个事件循环缓冲区聚合到一个怪物缓冲区中,然后将怪物下载到一个文件中

Since this code exposes microphone data as a buffer you could add ability to stream using websockets or simply aggregate each event loop buffer into a monster buffer then download the monster to a file

注意调用

    var audioContext = new AudioContext();

表示它使用 Web Audio API融入所有现代浏览器(包括移动浏览器)以提供一个极其强大的音频平台,其中麦克风只是一个小片段......注意 CPU 使用率猛增由于这个演示将每个事件循环缓冲区写入浏览器控制台日志,该日志仅用于测试,因此即使您将其修改为将音频流式传输到其他地方,实际使用的资源密集程度也低得多

which indicates its using the Web Audio API which is baked into all modern browsers (including mobile browsers) to provide an extremely powerful audio platform of which tapping into the mic is but a tiny fragment ... NOTE the CPU usage jumps up due to this demo writing each event loop buffer into browser console log which is for testing only so actual use is far less resource intensive even when you mod this to stream audio to elsewhere

某些 Web Audio API 文档的链接

这篇关于从浏览器访问麦克风 - Javascript的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆