WebRTC
什么是WebRTC
音视频处理+即时通讯的开源库-
WebRTC能干什么
- 音视频实时互动
- 游戏、即时通讯、文件传输等等
- 传输、音视频处理(回音消除、降噪等)
-
WebRTC架构
WebRTC源码目录结构
api webrtc接口层,浏览器都死通过该接口调用webrtc
call 数据流的管理层,Call代表同一个端点的所有数据的流入流出
video 与视频相关的逻辑
audio 与音频相关的逻辑
common_audio 音频算法相关
common_video 视频算法相关
media 与多媒体相关的逻辑处理,如编解码的逻辑处理
logging 日志相关
module 重要的目录,子模块
pc Peer Connection ,连接相关的逻辑
p2p 端对端相关代码, stun turn
rtc_base 基础代码,如线程,锁相关的统一接口代码
rtc_tool 音视频分析相关的工具代码
tool_webrtc webrtc测试相关的工具代码,如网络模拟器
system_wrappers 与操作系统相关的代码,如CPU特性,原子操作等
stats 存放各种数据统计相关的类
sdk 存放Android和ios层代码。如视频的采集,渲染等
- WebRTC Modules目录
audio_coding 音频编解码相关代码
audio_device 音频采集与音频播放相关代码
audio_mixer 混音相关的代码
audio_processing 音频前后处理相关的代码
bitrate_controller 码率控制相关代码
congestion_controller 流控相关的代码
desktop_capture 桌面采集相关的代码
pacing 码率探测及平滑处理相关的代码
remote_bitrate_estimator 远端码率估算相关的代码
rtp_rtcp rtp/rtcp协议相关的代码
video_capture 视频采集相关的代码
video_processing 视频前后处理相关的代码
轨与流(Track、MediaStream)
WebRTC重要类
MediaStream
RTCPeerConnection
RTCDataChannel
PeerConnection调用过程
API相关
- 获取音视频设备(enumerateDevices)
var ePromise = navigator.mediaDevices.enumerateDevices();
- 返回对象内部有MediaDevicesInfo
- deviceId 设备ID
- label 设备的名字
- kind 设备的种类
- groupId 两个设备groupID相同,说明是同一个物理设备
- 音频采集API
let promise=navigator.mediaDevices.getUserMedia(constraints);
-
MediaStreamConstraints
dictionary MediaStreamConstraints{ (boolean or MediaTrackConstraints)video=false; (boolean or MediaTrackConstraints)audio=false; }
Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<body>
<video id="player" autoplay playsinline></video>
</body>
</html>
<script>
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
} else {
//采集视频和音频
const constraints = {
video: true,
audio: true
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.catch(handleError);
}
let videoplay = document.querySelector('video#player');
function gotMediaStream(stream) {
//stream是采集传入的流
videoplay.srcObject = stream;
}
function handleError(err) {
console.log(err);
}
</script>
-
getUserMedia的不同实现
- getUserMedia(w3c)
- webkitGetUserMedia(chrome)
- mozGetUserMedia(火狐)
自己实现
var getUserMedia=navigator.getUserMedia||
navigator.webkitGetUserMedia||
navigator.mozGetUserMedia;
- 使用google开源适配库adapter.js
- Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<video id="player" autoplay playsinline></video>
</body>
</html>
<script>
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
} else {
//采集视频和音频
const constraints = {
video: true,
audio: false
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
let videoplay = document.querySelector('video#player');
function gotMediaStream(stream) {
//stream是采集传入的流
videoplay.srcObject = stream;
//能拿到流了证明用户同意访问音视频设备了,返回就是一个promise
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
</script>
- 视频约束
width: 宽
height:高
aspectRatio: 比例
frameRate: 帧率(帧率越高画面越平滑)
facingMode:
user: 前置摄像头
environment: 后置摄像头
left: 前置左摄像头
right: 前置右摄像头
resizeMode: 是否裁剪画面
- 音频约束
volume: 音量(0-1.0)
sampleRate: 采样率
sampleSize: 采样大小
echoCancellation: true/false 是否开启或者关闭回音消除
autoGainControl: true/false 是否自动增益(音量增大,当然也是有一定范围的)
noiseSuppression: true/false 是否开启降噪
latency: 延迟(延迟越小,实时性越好,但是当网络不好的时候则可能导致卡顿等等),一般低于500ms已经是很好的质量了,最好是200ms以内
channelCount: 单声道还是双声道,一般单声道就行了,但是例如音乐直播课则最好开启双声道
deviceID: 当多个设备的时候,可以切换,例如切换摄像头
groupID: 设备的唯一id
设置最大最小值,则会自动选择当能能选的最好的效果
{
audio: true,
video: {
width: {
min: 300,
max: 640
},
height: {
min: 300,
max: 480
},
frameRate:{
min:15,
max:30
}
}
}
- Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<video id="player" autoplay playsinline></video>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
width: 320,
height: 240,
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
function gotMediaStream(stream) {
//stream是采集传入的流
videoplay.srcObject = stream;
//能拿到流了证明用户同意访问音视频设备了,返回就是一个promise
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
function gotDevice(deviceInfos) {
//避免重复添加
audioSource.innerHTML = "";
audioOutput.innerHTML = "";
videoSource.innerHTML = "";
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
</script>
浏览器视频特效
- CSS filter,-webkit-filter/filter
- 如何将video与filter关联
- OpenGL/Metal.. 即使使用css filter实际底层还是这些图形绘制库
支持的特效种类
grayscale 灰度
opacity 透明度
sepia 褐色
brightness 亮度
saturate 饱和度
contrast 对比度
hue-rotate 色相旋转
blur 模糊
invert 反色
drop-shadow 阴影
- Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<style>
.none{
-webkit-filter: none;
}
.blur{
-webkit-filter: blur(3px);
}
.grayscale{
-webkit-filter: grayscale(1);
}
.invert{
-webkit-filter: invert(1);
}
.sepia{
-webkit-filter: sepia(1);
}
</style>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<div>
<label for="">特效选择</label>
<select id="filter">
<option value="none">None</option>
<option value="blur">模糊</option>
<option value="grayscale">灰度</option>
<option value="invert">反色</option>
<option value="sepia">褐色</option>
</select>
</div>
<video id="player" autoplay playsinline></video>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
width: 320,
height: 240,
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
function gotMediaStream(stream) {
//stream是采集传入的流
videoplay.srcObject = stream;
//能拿到流了证明用户同意访问音视频设备了,返回就是一个promise
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
let filterSelect = document.querySelector("select#filter");
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
filterSelect.onchange=function(){
videoplay.className=filterSelect.value;
}
</script>
从视频中获取图像
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<style>
.none{
-webkit-filter: none;
}
.blur{
-webkit-filter: blur(3px);
}
.grayscale{
-webkit-filter: grayscale(1);
}
.invert{
-webkit-filter: invert(1);
}
.sepia{
-webkit-filter: sepia(1);
}
</style>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<div>
<label for="">特效选择</label>
<select id="filter">
<option value="none">None</option>
<option value="blur">模糊</option>
<option value="grayscale">灰度</option>
<option value="invert">反色</option>
<option value="sepia">褐色</option>
</select>
</div>
<div>
<button id="snapshot">Take snapshot</button>
</div>
<div>
<canvas id="picture"></canvas>
</div>
<video id="player" autoplay playsinline></video>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
width: 320,
height: 240,
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
let snapshot=document.querySelector('button#snapshot');
let picture=document.querySelector('canvas#picture');
picture.width=320;
picture.height=240;
function gotMediaStream(stream) {
//stream是采集传入的流
videoplay.srcObject = stream;
//能拿到流了证明用户同意访问音视频设备了,返回就是一个promise
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
let filterSelect = document.querySelector("select#filter");
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
//特效事件
filterSelect.onchange=function(){
videoplay.className=filterSelect.value;
}
//截图事件
snapshot.onclick=function(){
//此句可以不要,只是图片需要滤镜时候才需要
// picture.className=filterSelect.value;
picture.getContext('2d').drawImage(videoplay,0,0,picture.width,picture.height);
}
</script>
实时听到声音
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<!-- controls显示暂停播放的按钮 -->
<audio id="audioplayer" controls autoplay></audio>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
audio:true
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let audioplayer = document.querySelector('audio#audioplayer');
function gotMediaStream(stream) {
audioplayer.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
let filterSelect = document.querySelector("select#filter");
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
</script>
MediaStreamAPI
webrtc中有流和轨的概念,一个流中可以有很多轨,音频媒体轨,视频媒体轨
函数
- MediaStream.addTrack() 添加轨
- MediaStream.removeTrack() 移除轨
- MediaStream.getVideoTracks() 获取所有的视频轨
- MediaStream.getAudioTracks() 获取所有的音频轨
事件
- MediaStream.onaddtrack 当添加轨的时候会触发
- MediaStream.onremoveTrack
- MediaStream.onended 流结束的事件
获取视频约束相关信息(通过流)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<div>
<label for="">特效选择</label>
<select id="filter">
<option value="none">None</option>
<option value="blur">模糊</option>
<option value="grayscale">灰度</option>
<option value="invert">反色</option>
<option value="sepia">褐色</option>
</select>
</div>
<div>
<canvas id="picture"></canvas>
</div>
<table>
<tr>
<td>
<video id="player" autoplay playsinline></video>
</td>
<td>
<div id="constraints"></div>
</td>
</tr>
</table>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
width: 320,
height: 240,
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
let divConstraints = document.querySelector('div#constraints');
function gotMediaStream(stream) {
videoplay.srcObject = stream;
let videoTrack=stream.getVideoTracks()[0];
let videoConstraints =videoTrack.getSettings();
divConstraints.textContent= JSON.stringify(videoConstraints,null,2);
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
</script>
录制
- MediaRecoder
let mediaRecorder=new MediaRecorder(stream,[options]);
参数说明:
stream 媒体流,可以从getUserMedia、<video>、<audio>或<canvas>获取
options 限制选项
限制选项说明:
mimeType
video/webm
audio/webm
video/webm;codecs=vp8
video/webm;codecs=h264
audio/webm;codecs=opus
等等,例如webm可以改为mp3/4等,当然不同格式后面支持的编码格式也不同
audioBitsPerSecond 音频码率
videoBitsPerSecond 视频码率
bitsPerSecond 整体码率
- API
MediaRecoder.start(timeslice)
开始录制媒体,timeslice是可选的,如果设置了会按事件切片存储数据
MediaRecoder.stop
停止录制,此时会触发包括最终Blob数据的dataavailable事件
MediaRecoder.pause
暂停录制
MediaRecoder.resume
恢复录制
MediaRecoder.isTypeSupported()
检查录制支持的文件格式(mp4等)
- 事件
MediaRecoder.ondataavailable
每次记录一定时间的数据时,(如果没有指定时间片,则记录整个数据时)会定期触发,会传递一个参数event,内部有data
MediaRecoder.error
当发生错粗时候触发,会自动停止录制
- js集中存储数据的方式
字符串
Blob
ArrayBuffer
ArrayBufferView
- 录制播放下载视频demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<video autoplay playsinline id="player"></video>
<video playsinline id="recplayer"></video>
<button id="record">录制</button>
<button id="recplay" disabled>播放</button>
<button id="download" disabled>下载</button>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
width: 320,
height: 240,
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
let recplayer = document.querySelector('video#recplayer');
let btnRecord = document.querySelector('button#record');
let btnRecplay = document.querySelector('button#recplay');
let btnDownload = document.querySelector('button#download');
function gotMediaStream(stream) {
videoplay.srcObject = stream;
window.stream = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
let buffer;
let mediaRecorder;
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
btnRecord.onclick = function () {
if (btnRecord.textContent === '录制') {
startRecord();
btnRecord.textContent = "停止录制";
btnRecplay.disabled = true;
btnDownload.disabled = true;
} else {
stopRecord();
btnRecord.textContent = "录制";
btnRecplay.disabled = false;
btnDownload.disabled = false;
}
}
//播放录制视频的逻辑
btnRecplay.onclick = function () {
let blob = new Blob(buffer, { type: 'video/webm' });
recplayer.src = window.URL.createObjectURL(blob);
recplayer.srcObject = null;
recplayer.controls = true;
recplayer.play();
}
//下载录制的视频
btnDownload.onclick = function () {
let blob = new Blob(buffer, { type: 'video/webm' });
const url = window.URL.createObjectURL(blob);
let a = document.createElement('a');
a.href = url;
a.style.display='none';//不显示出来,实现不点击a标签就下载
a.download='aaa.webm';//下载后的名字,此种可以通过浏览器打开播放
a.click();//实现
}
function handleonDataAvailable(e) {
if (e && e.data && e.data.size > 0) {
buffer.push(e.data);
}
}
//开始录制
function startRecord() {
buffer = [];
const options = {
mimeType: 'video/webm;codecs=vp8'
}
if (!MediaRecorder.isTypeSupported(options.mimeType)) {
console.error(`${options.mimeType} is not supported`);
}
try {
mediaRecorder = new MediaRecorder(window.stream, options);
} catch (error) {
console.log(error);
return
}
mediaRecorder.ondataavailable = handleonDataAvailable;
mediaRecorder.start(10);
}
//停止录制
function stopRecord() {
mediaRecorder.stop();
}
</script>
录制桌面
需要开启chrome的实验性功能如下图
- getDisplayMedia
let promise=navigator.mediaDevices.getDisplayMedia(constraints);
- constraints可选
constraints中约束与getUserMedia函数中一致
- demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>采集视频和音频</title>
</head>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<body>
<div>
<label for="">音频输入</label>
<select id="audioSource"></select>
</div>
<div>
<label for="">音频输出</label>
<select id="audioOutput"></select>
</div>
<div>
<label for="">视频输入</label>
<select id="videoSource"></select>
</div>
<video autoplay playsinline id="player"></video>
<video playsinline id="recplayer"></video>
<button id="record">录制</button>
<button id="recplay" disabled>播放</button>
<button id="download" disabled>下载</button>
</body>
</html>
<script>
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getDisplayMedia) {
console.log('navigator.mediaDevices不支持');
return;
} else {
const deviceId = videoSource.value;
//采集视频和音频
const constraints = {
video: {
frameRate: 10,
facingMode: 'enviroment',
deviceId: deviceId ? deviceId : undefined
},
audio: {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getDisplayMedia(constraints)
.then(gotMediaStream)
.then(gotDevice)
.catch(handleError);
}
}
let videoplay = document.querySelector('video#player');
let recplayer = document.querySelector('video#recplayer');
let btnRecord = document.querySelector('button#record');
let btnRecplay = document.querySelector('button#recplay');
let btnDownload = document.querySelector('button#download');
function gotMediaStream(stream) {
videoplay.srcObject = stream;
window.stream = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log(err);
}
let audioSource = document.querySelector("select#audioSource");
let audioOutput = document.querySelector("select#audioOutput");
let videoSource = document.querySelector("select#videoSource");
let buffer;
let mediaRecorder;
function gotDevice(deviceInfos) {
deviceInfos.forEach(deviceInfo => {
const { kind } = deviceInfo;
let option = document.createElement('option');
option.value = deviceInfo.deviceId;
option.text = deviceInfo.label;
if (kind === 'audioinput') {
audioSource.appendChild(option);
} else if (kind === 'audiooutput') {
audioOutput.appendChild(option);
} else if (kind === 'videoinput') {
videoSource.appendChild(option);
}
});
}
start();
//每次重新选择视频输入都会触发start
videoSource.onchange = start;
btnRecord.onclick = function () {
if (btnRecord.textContent === '录制') {
startRecord();
btnRecord.textContent = "停止录制";
btnRecplay.disabled = true;
btnDownload.disabled = true;
} else {
stopRecord();
btnRecord.textContent = "录制";
btnRecplay.disabled = false;
btnDownload.disabled = false;
}
}
//播放录制视频的逻辑
btnRecplay.onclick = function () {
let blob = new Blob(buffer, { type: 'video/webm' });
recplayer.src = window.URL.createObjectURL(blob);
recplayer.srcObject = null;
recplayer.controls = true;
recplayer.play();
}
//下载录制的视频
btnDownload.onclick = function () {
let blob = new Blob(buffer, { type: 'video/webm' });
const url = window.URL.createObjectURL(blob);
let a = document.createElement('a');
a.href = url;
a.style.display='none';//不显示出来,实现不点击a标签就下载
a.download='aaa.webm';//下载后的名字,此种可以通过浏览器打开播放
a.click();//实现
}
function handleonDataAvailable(e) {
if (e && e.data && e.data.size > 0) {
buffer.push(e.data);
}
}
//开始录制
function startRecord() {
buffer = [];
const options = {
mimeType: 'video/webm;codecs=vp8'
}
if (!MediaRecorder.isTypeSupported(options.mimeType)) {
console.error(`${options.mimeType} is not supported`);
}
try {
mediaRecorder = new MediaRecorder(window.stream, options);
} catch (error) {
console.log(error);
return
}
mediaRecorder.ondataavailable = handleonDataAvailable;
mediaRecorder.start(10);
}
//停止录制
function stopRecord() {
mediaRecorder.stop();
}
</script>