混合两个音频缓冲区,使用web Audio Api将一个放在另一个的背景上 [英] Mixing two audio buffers, put one on background of another by using web Audio Api

查看:119
本文介绍了混合两个音频缓冲区,使用web Audio Api将一个放在另一个的背景上的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我想通过将一首歌作为另一首歌的背景放入单一来源来混合两个音频源。

I want to mix two audio sources by put one song as background of another into single source.

例如,我输入了:

<input id="files" type="file" name="files[]" multiple onchange="handleFilesSelect(event)"/>

用于解码此文件的脚本:

And script to decode this files:

window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var sources = [];
var files = [];
var mixed = {};

function handleFilesSelect(event){
    if(event.target.files.length <= 1)
          return false;

     files = event.target.files;
     readFiles(mixAudioSources);
}

function readFiles(index, callback){
    var freader = new FileReader();
    var i = index ? index : 0;

    freader.onload = function (e) {     
        context.decodeAudioData(e.target.result, function (buf) {

            sources[i] = context.createBufferSource();
            sources[i].connect(context.destination);
            sources[i].buffer = buf;

            if(files.length > i+1){
                readFiles(i + 1, callback);
            } else {
                if(callback){
                    callback();
                }
            }
        });
    };

    freader.readAsArrayBuffer(files[i]);
}

function mixAudioSources(){
    //So on our scenario we have here two decoded audio sources in "sources" array.
    //How we can mix that "sources" into "mixed" variable by putting "sources[0]" as background of "sources[1]"
}

那么我如何将这些来源混合到一个来源?例如,我有两个文件,如何将一个源作为另一个源的背景并将此混合放入单个源?

So how i can mix this sources into one source? For example i have two files, how i can put one source as background of another and put this mix into single source?

另一种情况:如果我从麦克风读取输入流例如,我想把这个输入放在背景歌(某种卡拉OK)上,有可能在html5支持的客户端上做这项工作吗?性能怎么样?也许更好的方法在服务器端混合这些音频源?

Another scenario: if i read input stream from microphone for example and i want to put this input on background song (some kind of karaoke) it is possible to do this work on client with html5 support? What about performance? Maybe better way to mix this audio sources on server side?

如果可能的话,那么mixAudioSources的可能实现是什么?

If it possible, so what the possible implementation of mixAudioSources function?

谢谢。

推荐答案

两种方法最初发布于是否可以在顶部混合多个音频文件彼此最好使用javascript ,调整为处理文件对象更改 < input type =file> 元素。

Two approach originally posted at Is it possible to mix multiple audio files on top of each other preferably with javascript, adjusted to process File objects at change event of <input type="file"> element.

第一种方法使用 OfflineAudioContext() AudioContext.createBufferSource() AudioContext.createMediaStreamDestination()承诺构造函数, Promise.all() MediaRecorder()混合音轨,然后提供混合音频文件供下载。

The first approach utilizes OfflineAudioContext(), AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination(), Promise constructor, Promise.all(), MediaRecorder() to mix audio tracks, then offer mixed audio file for download.

var div = document.querySelector("div");

function handleFilesSelect(input) {
  div.innerHTML = "loading audio tracks.. please wait";
  var files = Array.from(input.files);
  var duration = 60000;
  var chunks = [];
  var audio = new AudioContext();
  var mixedAudio = audio.createMediaStreamDestination();
  var player = new Audio();
  var context;
  var recorder;
  var description = "";
  
  player.controls = "controls";
  
  function get(file) {
    description += file.name.replace(/\..*|\s+/g, "");
    return new Promise(function(resolve, reject) {
      var reader = new FileReader;
      reader.readAsArrayBuffer(file);
      reader.onload = function() {
        resolve(reader.result)
      }
    })
  }

  function stopMix(duration, ...media) {
    setTimeout(function(media) {
      media.forEach(function(node) {
        node.stop()
      })
    }, duration, media)
  }

  Promise.all(files.map(get)).then(function(data) {
      var len = Math.max.apply(Math, data.map(function(buffer) {
        return buffer.byteLength
      }));
      context = new OfflineAudioContext(2, len, 44100);
      return Promise.all(data.map(function(buffer) {
          return audio.decodeAudioData(buffer)
            .then(function(bufferSource) {
              var source = context.createBufferSource();
              source.buffer = bufferSource;
              source.connect(context.destination);
              return source.start()
            })
        }))
        .then(function() {
          return context.startRendering()
        })
        .then(function(renderedBuffer) {
          return new Promise(function(resolve) {
            var mix = audio.createBufferSource();
            mix.buffer = renderedBuffer;
            mix.connect(audio.destination);
            mix.connect(mixedAudio);
            recorder = new MediaRecorder(mixedAudio.stream);
            recorder.start(0);
            mix.start(0);
            div.innerHTML = "playing and recording tracks..";
            // stop playback and recorder in 60 seconds
            stopMix(duration, mix, recorder)

            recorder.ondataavailable = function(event) {
              chunks.push(event.data);
            };

            recorder.onstop = function(event) {
              var blob = new Blob(chunks, {
                "type": "audio/ogg; codecs=opus"
              });
              console.log("recording complete");
              resolve(blob)
            };
          })
        })
        .then(function(blob) {
          console.log(blob);
          div.innerHTML = "mixed audio tracks ready for download..";
          var audioDownload = URL.createObjectURL(blob);
          var a = document.createElement("a");
          a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
          a.href = audioDownload;
          a.innerHTML = a.download;
          document.body.appendChild(a);
          a.insertAdjacentHTML("afterend", "<br>");
          player.src = audioDownload;
          document.body.appendChild(player);
        })
    })
    .catch(function(e) {
      console.log(e)
    });

}

<!DOCTYPE html>
<html>

<head>
</head>

<body>
  <input id="files" 
         type="file" 
         name="files[]" 
         accept="audio/*" 
         multiple 
         onchange="handleFilesSelect(this)" />
  <div></div>
</body>

</html>

第二种方法使用 AudioContext.createChannelMerger() AudioContext.createChannelSplitter()

var div = document.querySelector("div");

function handleFilesSelect(input) {

  div.innerHTML = "loading audio tracks.. please wait";
  var files = Array.from(input.files);
  var chunks = [];
  var channels = [
    [0, 1],
    [1, 0]
  ];
  var audio = new AudioContext();
  var player = new Audio();
  var merger = audio.createChannelMerger(2);
  var splitter = audio.createChannelSplitter(2);
  var mixedAudio = audio.createMediaStreamDestination();
  var duration = 60000;
  var context;
  var recorder;
  var audioDownload;
  var description = "";

  player.controls = "controls";

  function get(file) {
    description += file.name.replace(/\..*|\s+/g, "");
    console.log(description);
    return new Promise(function(resolve, reject) {
      var reader = new FileReader;
      reader.readAsArrayBuffer(file);
      reader.onload = function() {
        resolve(reader.result)
      }
    })
  }

  function stopMix(duration, ...media) {
    setTimeout(function(media) {
      media.forEach(function(node) {
        node.stop()
      })
    }, duration, media)
  }

  Promise.all(files.map(get)).then(function(data) {
      return Promise.all(data.map(function(buffer, index) {
          return audio.decodeAudioData(buffer)
            .then(function(bufferSource) {
              var channel = channels[index];
              var source = audio.createBufferSource();
              source.buffer = bufferSource;
              source.connect(splitter);
              splitter.connect(merger, channel[0], channel[1]);          
              return source
            })
        }))
        .then(function(audionodes) {
          merger.connect(mixedAudio);
          merger.connect(audio.destination);
          recorder = new MediaRecorder(mixedAudio.stream);
          recorder.start(0);
          audionodes.forEach(function(node, index) {
            node.start(0)
          });
          
          div.innerHTML = "playing and recording tracks..";
          
          stopMix(duration, ...audionodes, recorder);

          recorder.ondataavailable = function(event) {
            chunks.push(event.data);
          };

          recorder.onstop = function(event) {
            var blob = new Blob(chunks, {
              "type": "audio/ogg; codecs=opus"
            });
            audioDownload = URL.createObjectURL(blob);
            var a = document.createElement("a");
            a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
            a.href = audioDownload;
            a.innerHTML = a.download;
            player.src = audioDownload;
            document.body.appendChild(a);
            document.body.appendChild(player);
          };
        })
    })
    .catch(function(e) {
      console.log(e)
    });
}

<!DOCTYPE html>
<html>

<head>
</head>

<body>
  <input id="files" 
         type="file" 
         name="files[]" 
         accept="audio/*" 
         multiple onchange="handleFilesSelect(this)" />
  <div></div>
</body>

</html>

这篇关于混合两个音频缓冲区,使用web Audio Api将一个放在另一个的背景上的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆