对于使用Meteor框架的跨平台应用程序项目,我想录制麦克风输入并提取语音,这要归功于Google Speech API

在Google文档之后,我更具体地尝试构建音频流来为Google语音客户端提供信息 .

在客户端,录制按钮触发以下 startCapture 函数(基于cordova audioinput plugin):

export var startCapture = function () {
  try {
    if (window.audioinput && !audioinput.isCapturing()) {

      setTimeout(stopCapture, 20000);

      var captureCfg = {
        sampleRate: 16000,
        bufferSize: 2048,
      }
      audioinput.start(captureCfg);
    }
  }
  catch (e) {
  }
}

audioinput 事件允许我在录制时获取大量音频数据:

window.addEventListener('audioinput', onAudioInputCapture, false);

var audioDataQueue = [];
function onAudioInputCapture(evt) {
  try {
    if (evt && evt.data) {
      // Push the data to the audio queue (array)
      audioDataQueue.push(evt.data);

      // Here should probably be a call to a Meteor server method?
    }
  }
  catch (e) {
  }
}

我正在努力将录制的音频数据转换为一些ReadableStream,我会在服务器端管道到Google Speech API客户端 .

const speech = require('@google-cloud/speech');

const client = new speech.SpeechClient();
const request = {
  config: {
    encoding: "LINEAR16",
    sampleRateHertz: 16000,
    languageCode: 'en-US',
  },
 interimResults: true,
};

export const recognizeStream = client
 .streamingRecognize(request)
 .on('error', console.error)
 .on('data', data =>
   console.log(data.results)
 );

我尝试了以下方法,但感觉不是正确的方法:

const Stream = require('stream')

var serverAudioDataQueue = [];
const readable = new Stream.Readable({
  objectMode: true,
});
readable._read = function(n){
  this.push(audioDataQueue.splice(0, audioDataQueue.length))
}
readable.pipe(recognizeStream);

Meteor.methods({
  'feedGoogleSpeech': function(data){
    data.forEach(item=>serverAudioDataQueue.push(item));
  },
...
});

对此有何见解?