现在我的项目是使用electron-vue写的,有一个需求是需要把摄像头画面采集上来,并且需要将每帧画面传递到Python做图像识别,现在视频已经可以采集到了,但就是怎么获取每帧的数据,各位有遇到过或解决过这种问题的,请回复下,谢谢
下面是获取摄像头代码:
/** @desc 获取计算机外设列表 储存摄像头数据 */
getDevices() {
return new Promise((resolve, reject) => {
if (
!navigator.mediaDevices ||
!navigator.mediaDevices.enumerateDevices
) {
window.alert('不支持 mediaDevices.enumerateDevices()');
}
navigator.mediaDevices
.enumerateDevices()
.then((devices) => {
this.cameraList = [];
devices.forEach((device, index) => {
if (device.kind && device.kind === 'videoinput') {
this.cameraList.push({
id: device.deviceId,
label: device.label,
});
}
});
resolve();
})
.catch((err) => {
console.log(err.name + ': ' + err.message);
reject();
});
});
},
main() {
this.cameraList.forEach((camera, index) => {
let domId = `video${index}`;
let video = document.getElementById(domId);
this.enableCamera(camera.id, video,index);
});
},
/** @desc 摄像头使能封装 */
enableCamera(deviceId, video,index) {
this.getUserMedia(
this.setConstraints(deviceId),
(stream) => {
// var videoTracks = stream.getVideoTracks();
// console.log('Using video device: ' + videoTracks[0].label);
// stream.onended = function() {
// console.log('Stream ended');
// };
mediaStreamArray.push(stream)
video.srcObject = stream;
video.onloadedmetadata = (e) => {
video.play();
};
// debugger
// var stream_video = video.captureStream() || video.mozCaptureStream();
// var videoTrack_media = stream_video .getVideoTracks()[0].clone();
// console.log(videoTrack_media)
if(index == 0){
this.bufferDataOne = stream
}else if(index == 1){
this.bufferDataTwo = stream
}else if(index == 2){
this.bufferDataThree = stream
}else if(index == 3){
this.bufferDataFour = stream
}
},
(error) => {
console.log(`访问用户媒体设备失败${error.name}, ${error.message}`);
}
);
},
获取到视频stream,就是你promise返回的stream,然后从视频流得到视轨
就是截图信息,videoSource: stream.getVideoTracks()[0]
this.shareStream = TRTC.createStream({ // 这个函数就是创建媒体流的函数
videoSource: stream.getVideoTracks()[0] // 获取视频轨信息
})
可以通过canvas元素➕requestAnimationFrame()取得每帧的画面
之前使用canvas画图,再把图片发送到后端,界面上的按钮会有卡顿,所以又换了其他方式。
参考ImageCapture
ImageCapture.takePhoto() - Web APIs | MDN The takePhoto() method of the ImageCapture interface takes a single exposure using the video capture device sourcing a MediaStreamTrack and returns a Promise that resolves with a Blob containing the data. https://developer.mozilla.org/en-US/docs/Web/API/ImageCapture/takePhoto
//还是使用的定时
setInterImage(){
var _this = this
console.log('开始循环定时发送图片信息')
if(setInterImagerTimer){
clearInterval(setInterImagerTimer)
setInterImagerTimer = null
}
setInterImagerTimer = setInterval(()=>{
// _this.toImageBute()
// _this.onGrabFrameButtonClick()
_this.onTakePhotoButtonClick()
},100)
},
//使用ImageCapture.takePhoto()获取到blob,再讲blob转成base64的字符串,传递到后台处理
onTakePhotoButtonClick() {
var _this = this
if(imageCapture != null){
imageCapture.takePhoto()
.then(
blob =>
{
_this.blobToBase64(blob).then(
res =>{
// console.log("base64")
ipcRenderer.send('canvasVideo', res)
})
}
)
.catch(error => console.log(error));
}
},
// 原理:利用fileReader的readAsDataURL,将blob转为base64
blobToBase64(blob) {
return new Promise((resolve, reject) => {
const fileReader = new FileReader();
fileReader.onload = (e) => {
resolve(e.target.result);
};
// readAsDataURL
fileReader.readAsDataURL(blob);
fileReader.onerror = () => {
reject(new Error('文件流异常'));
};
})
},