Skip to content

Instantly share code, notes, and snippets.

@adlerdevfull
Created December 28, 2021 11:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save adlerdevfull/3c5ca9fe0791540507ded6c364c43656 to your computer and use it in GitHub Desktop.
Save adlerdevfull/3c5ca9fe0791540507ded6c364c43656 to your computer and use it in GitHub Desktop.
// start the room
socket.on('join room', async (data) => {
console.log('join room', data)
socket.emit('getCanvas');
let conc = [];
data.forEach((user_data) => {
dataModelVideo[user_data.socket_id] = user_data;
conc.push(user_data.socket_id);
})
// dataModelVideo = data;
// let conc = Object.keys(dataModelVideo);
if (conc) {
conc.forEach(sid => {
if(sid != socket.id) {
addRTC(sid);
console.log('addRTC join room')
} else whoami.push(sid)
});
}
await startStream();
feather.replace();
})
// start stream
async function startStream(type = 'init') {
if(!mystreamvideo) {
console.log('startStrem: mediaConstraints', mediaConstraints);
try {
mystreamvideo = await navigator.mediaDevices.getUserMedia(mediaConstraints);
myvideo.srcObject = mystreamvideo;
} catch (e) {
handleGetUserMediaError(e);
if(mediaConstraints.video) {
mediaConstraints.video = false;
startStream(type);
}
return false;
}
}
if(type == 'screen_on') {
try {
myscreenshare = await navigator.mediaDevices.getDisplayMedia({cursor: true});
myscreenshare.getVideoTracks()[0].onended = () => { startStream('screen_off'); };
} catch (e) {
handleGetUserMediaError(e);
return false;
}
myvideo.srcObject = myscreenshare;
}
if(type == 'screen_off') {
myvideo.srcObject = mystreamvideo;
myscreenshare = null;
}
let is_sid = connections[type] ? true : false;
let audio_track = mystreamvideo.getAudioTracks();
audio_track = (audio_track && audio_track.length) ? audio_track[0] : null
let video_track = myscreenshare ? myscreenshare.getVideoTracks() : mystreamvideo.getVideoTracks();
video_track = (video_track && video_track.length) ? video_track[0] : null;
console.log(`startStrem: type=${type}, is_sid=${is_sid}`, audio_track, video_track);
//If you are starting screen sharing (screen_on) and the video is locked, unlock it;
//If you are starting connections (init) or ending a video sharing, and you don't have a webcam, block the video;
if((['screen_off', 'init'].includes(type) && (!video_track && videoAllowed)) || ((type == 'screen_on') && (video_track && !videoAllowed))) {
$(videoButt).click();
}
for(key in connections) {
let sid = is_sid ? type : key;
let senders = connections[sid].getSenders();
let audio_sender = (senders && senders.length) ? senders.filter(sender => {return sender && sender.track && sender.track.kind == 'audio'}) : null;
console.log('audio sender', audio_sender);
if((!audio_sender || !audio_sender.length) && audio_track) {
audio_track_stream = await connections[sid].addTrack(audio_track, mystreamvideo);
audio_track_connections.push({
audio_track: audio_track_stream
})
console.log('startStream: add audio');
senders = connections[sid].getSenders();
}
let video_sender = (senders && senders.length) ? senders.filter(sender => {return sender && sender.track && sender.track.kind == 'video'}) : null;
if(video_sender && video_sender.length) {
if(!video_track) {
await connections[sid].removeTrack(video_sender[0]);
console.log('startStream: remove video');
} else if(video_track.id != video_sender[0].track.id) {
await video_sender[0].replaceTrack(video_track);
console.log('startStream: switch video');
}
} else if(video_track) {
// await connections[sid].addTrack(video_track, myscreenshare ? myscreenshare : mystreamvideo);
if(myscreenshare) {
await connections[sid].addTrack(video_track); //streamless addtrack -> adiciona a track na stream do peer remoto manualmente, para adicionar junto com a do audio
console.log('startStream: add screen video');
} else {
await connections[sid].addTrack(video_track, mystreamvideo);
console.log('startStream: add webcam video');
}
}
if(is_sid) break;
}
return true;
}
// add rtc
function addRTC(sid) {
console.log('RTCPeerConnection', sid)
connections[sid] = new RTCPeerConnection(configuration);
if (!document.getElementById(sid)) {
createVideoBox(sid);
feather.replace();
}
connections[sid].onicecandidate = function (event) {
if (event.candidate) {
console.log(`[ICE] canditate sent to ${sid}:`, event.candidate);
socket.emit('new icecandidate', event.candidate, sid);
}
};
connections[sid].ontrack = function (event) {
// console.log("recebendo tracks", event.track, event.streams);
console.log("recebendo tracks do " + sid, event.track, event.streams);
if (tracks_audio_aux_id.indexOf(sid) == -1 && event.track.kind == 'audio') {
tracks_audio_aux_id.push(sid)
tracks_audio.push({
id: sid,
track: event.track,
stream: event.streams
})
}
let video_element = document.getElementById(`video${sid}`);
if(event.streams && event.streams[0]) {
video_element.srcObject = event.streams[0];
} else {
event.track.onmute = () => {
console.log(`${sid} stopped screen sharing and has no webcam`);
video_element.srcObject.removeTrack(event.track);
};
video_element.srcObject.addTrack(event.track);
}
};
// connections[sid].addTransceiver("video");
connections[sid].onremovetrack = function (event) {
console.log('track removed', sid, event);
if (document.getElementById(sid)) {
document.getElementById(sid).remove();
}
}
connections[sid].onnegotiationneeded = async function () {
try {
const offer = await connections[sid].createOffer()
await connections[sid].setLocalDescription(offer);
socket.emit('video-offer', connections[sid].localDescription, sid);
console.log(`[SDP] offer sent to ${sid}:`, connections[sid].localDescription);
} catch (e) {
reportError(e);
}
};
}
// this is function start record stream
async function startStreamRecording() {
recordedBlobs = [];
let options = getSupportedMimeTypes();
console.log('MediaRecorder options supported', options);
options = { mimeType: options[2] }; // select the first available as mimeType
try {
var __voice_stream = {echoCancellation: true, noiseSuppression: true, sampleRate: 44100}
const DISPLAY_STREAM = await navigator.mediaDevices.getDisplayMedia({video: {cursor: "motion"}, audio: __voice_stream}); // retrieving screen-media
const VOICE_STREAM = await navigator.mediaDevices.getUserMedia({
audio: __voice_stream, video: false }); // retrieving microphone-media
let audio_track_VOICE_STREAM = VOICE_STREAM.getAudioTracks();
AUDIO_CONTEXT = new AudioContext();
audio_track_VOICE_STREAM.map(t => console.log('3333 t', audio_track_VOICE_STREAM, t))
MEDIA_AUDIO = AUDIO_CONTEXT.createMediaStreamSource(DISPLAY_STREAM); // passing source of on-screen audio
MIC_AUDIO = AUDIO_CONTEXT.createMediaStreamSource(VOICE_STREAM); // passing source of microphone audio
const sources = audio_track_VOICE_STREAM.map(t => AUDIO_CONTEXT.createMediaStreamSource(new MediaStream([t])));
AUDIO_MERGER = AUDIO_CONTEXT.createMediaStreamDestination(); // audio merger
MEDIA_AUDIO.connect(AUDIO_MERGER); // passing media-audio to merger
MIC_AUDIO.connect(AUDIO_MERGER); // passing microphone-audio to merger
sources.forEach(s => s.connect(AUDIO_MERGER));
console.log('======>>>>> AUDIO_MERGER.stream.getAudioTracks ====>>>>>> ', AUDIO_MERGER.stream.getAudioTracks())
const TRACKS = [...DISPLAY_STREAM.getVideoTracks(), ...AUDIO_MERGER.stream.getAudioTracks()] // connecting on-screen video with merged-audio
stream = new MediaStream(TRACKS);
mediaRecorder = new MediaRecorder(stream);
handleMediaRecorder(mediaRecorder);
$('#streamRecording').addClass('fa-blink')
$('#streamRecording').attr('onclick', 'stopStreamRecording()')
$('#streamRecording>span').html('Parar Gravação')
} catch (err) {
console.error('Exception while creating MediaRecorder: ', err);
return;
}
}
@adlerdevfull
Copy link
Author

I'm just creating this way because I thought it would be better to record audio from all users in the room.
please disregard this snippet of code "audio_track_VOICE_STREAM", I had no result with it...

as I have a video conference room, I can put as many users as needed, and in this case, my idea is to capture the audio of each one and save it to audiocontext, but when I do this with DISPLAY_STREAM and/or VOICE_STREAM I can only get my stream,

and I don't know how to get the other audios of the participants in the room, I tried to use
const DISPLAY_STREAM = await navigator.mediaDevices.getDisplayMedia({video: {cursor: "motion"}, audio: __voice_stream}); // retrieving screen-media
const VOICE_STREAM = await navigator.mediaDevices.getUserMedia({ audio: __voice_stream, video: false }); // microphone-media retrieving
to capture everyone's audio and put it in the audiocontext, but I couldn't do that

@guest271314
Copy link

Is the code in the gist run on each users' machine?

@adlerdevfull
Copy link
Author

Yeap... here I just put the part I use to connect to RTCPeerConnection and create the stream

@guest271314
Copy link

If I understand the use case correctly users can send their video and audio to a centralized server which can then re-transmit the merged data to all peers.

@adlerdevfull
Copy link
Author

that's right

@guest271314
Copy link

I still do not gather where the specific issue is.

@adlerdevfull
Copy link
Author

in your analysis, the way the code is, is it to work to record the audio of all participants in a track and save the screen recording?

@adlerdevfull
Copy link
Author

In this link,
https://github.com/rohitbharti279/Videocall_socket-node
there's a clear example of what I'm trying to say, it's easy to install and run the code, after everything is running, try recording with screenshare and talking on both sides of the browser, it will only record on one side of the browser, the other is mute or something

@guest271314
Copy link

Are you sure you are recordingthe remote stream, and not the local stream?

@adlerdevfull
Copy link
Author

I asked myself this question, how can you be sure if you are being local or remote?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment