使用socket.io-stream进行麦克风广播

时间:2018-09-21 03:18:03

标签: socket.io streaming audio-streaming broadcast microphone

我正在阅读本教程binaryjs mic streaming,并且我使用socket.io-stream对socket.io做了同样的事情。  但问题是我的声音很大。  这是我的代码 而且我认为playCache函数中的问题无法按我的意愿读取缓冲区

我不想使用webRTC,我知道它比使用websockets更好,但现在我想继续使用websockets

    // server side code 

    sio.of('/').on('connection', function (socketClient) {

        ss(socketClient).on('talking', function (stream, data){
            // send to all including me
            let send = ss.createStream();
            stream.pipe(send);
            ss(socketClient).emit('receive-voice', send);

        });

    });

    // client side code

import io from 'socket.io-client';
import ss from 'socket.io-stream';

export let socketIO;

let bufferSize = 2048;
let AudioContext;
let context;
let processor;
let input;
let globalStream;

const constraintsAudio = {
    audio: true,
    video: false
};

let recorderBroadcast;
let startTime = 0;
let soundController = {};

let speakerContext = window.AudioContext || window.webkitAudioContext;

soundController.speakerContext = new speakerContext();

soundController.playCache = (cache) => {
    while (cache.length) {
        let buffer = cache.shift();
        let source = soundController.speakerContext.createBufferSource();
        source.buffer = buffer;
        source.connect(soundController.speakerContext.destination);
        if (soundController.nextTime == 0) {
            // add a delay of 0.05 seconds
            soundController.nextTime = soundController.speakerContext.currentTime + 0.05;
        }
        source.start(soundController.nextTime);
        // schedule buffers to be played consecutively
        soundController.nextTime += source.buffer.duration;
    }
};


export const socketIOConnect = () => {

    socketIO = io({
        reconnection: true,
        reconnectionDelay: 1000,
        reconnectionDelayMax: 5000,
        reconnectionAttempts: Infinity
    });
    socketIO.open();

    socketIO.on('connect', () => {

        console.log('client side connected!!');

        soundController.nextTime = 0;
        let init = false;
        let audioCache = [];

        ss(socketIO).on('receive-voice', function (stream, data) {

            stream.on('data', function (data) {
                 console.log(data);

               // let array = new Float32Array(convertFloat32ToInt16(converteUInt8ArrayToFloat32(data))); using this line i hear voice with noise
                let array = new Float32Array(data);
                // console.log(array);
                let buffer = soundController.speakerContext.createBuffer(1, bufferSize, 44100);
                buffer.copyToChannel(array, 0);

                audioCache.push(buffer);
                // console.log(audioCache.length);
                // make sure we put at least 5 chunks in the buffer before starting
                if ((init === true) || ((init === false) && (audioCache.length > 5))) {
                    init = true;
                    soundController.playCache(audioCache);
                    audioCache = [];

                }

            });

            stream.on('end', function () {
                //  console.log('end');
            });


        });


    });
};


const sendStream = (audioBuffer) => {
    if (socketIO) {
        let stream = ss.createStream();
        ss(socketIO).emit('talking', stream /*, {client: socketIO.id}*/);
        stream.write(new ss.Buffer(audioBuffer));
        //  ss.createBlobReadStream(audioBuffer).pipe(stream);
        // stream.write(audioBuffer);
       //  stream.end();
    }

};

const initRecording = () => {

    AudioContext = window.AudioContext || window.webkitAudioContext;
    context = new AudioContext();
    processor = context.createScriptProcessor(bufferSize, 1, 1);
    processor.connect(context.destination);
    context.resume();

    const handleSuccess = (stream) => {
        globalStream = stream;
        input = context.createMediaStreamSource(stream);
        input.connect(processor);

        processor.onaudioprocess = function (e) {
            microphoneProcess(e);
        };
    };

    navigator.mediaDevices.getUserMedia(constraintsAudio)
        .then(handleSuccess);

};

const microphoneProcess = (e) => {
    let left = e.inputBuffer.getChannelData(0);
    let left16 = convertFloat32ToInt16(left);
    // console.log(left16)
    // convertTypedArray(new Float32Array(left), Uint8Array);
    sendStream(left16);
    // socket.emit('binaryData', left16);
};

0 个答案:

没有答案