我在使用recorder录音的同时,不知道为啥会播放,该怎样关掉哎
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
let HZRecorder = function (stream, config) {
// const that = this;
config = config || {}
config.sampleBits = config.sampleBits || 16 // 采样数位 8, 16
config.sampleRate = config.sampleRate || 16000 // 采样率(1/6 44100)
let context = new (window.webkitAudioContext || window.AudioContext)()
let audioInput = context.createMediaStreamSource(stream)
let createScript = context.createScriptProcessor || context.createJavaScriptNode
let recorder = createScript.apply(context, [4096, 1, 1])
let transcount = 0;
let pos = 0;
let buffer48 = new Float32Array(1024 * 3);
let buffer16 = new Float32Array(buffer48.length / 3);
let audioData = {
size: 0, // 录音文件长度
buffer: [], // 录音缓存
inputSampleRate: context.sampleRate, // 输入采样率
inputSampleBits: 16, // 输入采样数位 8, 16
outputSampleRate: config.sampleRate, // 输出采样率
oututSampleBits: config.sampleBits, // 输出采样数位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // 合并压缩
// 合并
let data = new Float32Array(this.size)
let offset = 0
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 压缩
let compression = parseInt(this.inputSampleRate / this.outputSampleRate)
let length = data.length / compression
let result = new Float32Array(length)
let index = 0;
let j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
let bytes = this.compress()
let dataLength = bytes.length * (sampleBits / 8)
let buffer = new ArrayBuffer(44 + dataLength)
let data = new DataView(buffer)
let channelCount = 1 // 单声道
let offset = 0
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// 资源交换文件标识符
writeString('RIFF');
offset += 4
// 下个地址开始到文件尾总字节数,即文件大小-8
data.setUint32(offset, 36 + dataLength, true);
offset += 4
// WAV文件标志
writeString('WAVE');
offset += 4
// 波形格式标志
writeString('fmt ');
offset += 4
// 过滤字节,一般为 0x10 = 16
data.setUint32(offset, 16, true);
offset += 4
// 格式类别 (PCM形式采样数据)
data.setUint16(offset, 1, true);
offset += 2
// 通道数
data.setUint16(offset, channelCount, true);
offset += 2
// 采样率,每秒样本数,表示每个通道的播放速度
data.setUint32(offset, sampleRate, true);
offset += 4
// 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true);
offset += 4
// 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
data.setUint16(offset, channelCount * (sampleBits / 8), true);
offset += 2
// 每样本数据位数
data.setUint16(offset, sampleBits, true);
offset += 2
// 数据标识符
writeString('data');
offset += 4
// 采样数据总数,即数据总大小-44
data.setUint32(offset, dataLength, true);
offset += 4
// 写入采样数据
if (sampleBits === 8) {
for (let i = 0; i < bytes.length; i++, offset++) {
let s = Math.max(-1, Math.min(1, bytes[i]))
let val = s < 0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (let i = 0; i < bytes.length; i++, offset += 2) {
let s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
}
return new Blob([data], {
type: 'audio/wav'
})
}
}
// 开始录音
this.start = function () {
console.log('开始录音')
audioInput.connect(recorder)
recorder.connect(context.destination)
}
// 停止
this.stop = function () {
console.log('停止')
recorder.disconnect()
}
// 获取音频文件
this.getBlob = function () {
console.log('获取音频')
this.stop()
return audioData.encodeWAV()
}
// // 回放
// this.play = function (audio) {
// console.log('播放')
// audio.src = window.URL.createObjectURL(this.getBlob())
// }
// 上传
this.upload = function (url, callback) {
let fd = new FormData()
fd.append('audioData', this.getBlob())
let xhr = new XMLHttpRequest()
/* eslint-disable */
if (callback) {
xhr.upload.addEventListener('progress', function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load', function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error', function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort', function (e) {
callback('cancel', e)
}, false)
}
/* eslint-disable */
xhr.open('POST', url)
xhr.send(fd)
}
//初始化canvas绘图画布,注意,这里就是你将要在试图上输出的canvas区域
let canvas = document.getElementById('mycanvas');
let width = canvas.width;
let height = canvas.height;
let g = canvas.getContext("2d");
g.strokeStyle = "#409EFF";
g.translate(0.5, height / 2 + 0.5);
// 音频采集
recorder.onaudioprocess = function (e) {
// 获取输入和输出的缓冲区
audioData.input(e.inputBuffer.getChannelData(0))
//获取输入和输出的数据缓冲区(48000,32,1)
let input = e.inputBuffer.getChannelData(0);
// 输出设置为空,消除混音
let output = e.outputBuffer.getChannelData(0);
//缓存数据
for (let n = 0; n < input.length; n++) {
buffer48[pos++] = input[n];
}
transcount++;
if (transcount == 3) {
//重置参数
transcount = 0;
pos = 0;
//48k转16k
let offset = 0;
for (let k = 0; k < buffer48.length; k += 3) {
buffer16[offset++] = buffer48[k];
}
//32转16位
let dataAsInt16Array = new Int16Array(buffer16.length);
for (let i = 0; i < input.length; i++) {
let s = Math.max(-1, Math.min(1, buffer16[i]));
if (s < 0) {
dataAsInt16Array[i] = 0x8000 * s;
} else {
dataAsInt16Array[i] = 0x7fff * s;
}
}
}
for (let i = 0; i < input.length; i++) {
output[i] = input[i];
}
//将缓冲区的数据绘制到Canvas上
g.clearRect(-0.5, -height / 2 - 0.5, width, height);
g.beginPath();
for (let i = 0; i < width; i++)
//这里是图像输出算法,可以写自己想输出的效果
g.lineTo(i, height / 2 * output[(output.length * i / width) | 0]);
g.stroke();
}
}
// 抛出异常
HZRecorder.throwError = function (message) {
alert(message)
throw new function () {
this.toString = function () {
return message
}
}()
}
// 是否支持录音
HZRecorder.canRecording = (navigator.getUserMedia != null)
// 获取录音机
HZRecorder.get = function (callback, config) {
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
} // 只启用音频
,
function (stream) {
let rec = new HZRecorder(stream, config)
callback(rec)
},
function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
HZRecorder.throwError('用户拒绝提供信息。')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
HZRecorder.throwError('浏览器不支持硬件设备。')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
HZRecorder.throwError('无法发现指定的硬件设备。')
break
default:
HZRecorder.throwError('无法打开麦克风。异常信息:' + (error.code || error.name))
break
}
})
} else {
HZRecorder.throwErr('当前浏览器不支持录音功能。');
return
}
}
}
export default HZRecorder
调用录音开始时,一录音就播放出来了
这段代码是一个录音程序,使用的是HTML5 Web Audio API。它允许您录制麦克风音频并将其保存为WAV文件。
代码中包含了一些常量和变量的定义,例如“config.sampleBits”和“config.sampleRate”,用于设置录制音频的采样位数和采样率。此外还有“navigator.getUserMedia”方法用于获取用户的媒体访问控制(Media Access Control, MAC)以获取麦克风音频流,并使用“window.URL”或“window.webkitURL”创建一个URL对象以播放音频。
主要功能实现在“HZRecorder”对象中,它将音频流传递给“context.createMediaStreamSource(stream)”方法并将其转换为可处理的音频数据。它使用“context.createScriptProcessor”或“context.createJavaScriptNode”方法创建一个脚本处理器节点,并将其附加到音频输入源上。然后,它定义了一些变量,例如“transcount”、“pos”、“buffer48”和“buffer16”,这些变量用于存储和处理音频数据。最后,“HZRecorder”对象定义了一些方法,例如“input”、“compress”和“encodeWAV”,用于将录制的音频数据转换为WAV格式的音频文件。
如果您想停止播放音频,可以调用“audio.pause()”方法。如果您想重新开始播放,可以调用“audio.play()”方法。
参考GPT和自己的思路,这个问题可能是由于在录音时使用了浏览器的默认音频输出设备,而导致录音的音频信号被放回到了音频输入设备,从而导致了回声的产生。
要解决这个问题,可以尝试在录音时关闭浏览器的默认音频输出设备。这可以通过以下几种方式实现:
尝试使用 MediaStreamConstraints 对象中的 audio 属性中的 echoCancellation 参数来关闭回声消除:
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: false
}
})
在录音开始前,将浏览器的默认音频输出设备设置为静音,这可以通过以下方式实现:
navigator.mediaDevices.getUserMedia({ audio: true }).then(function (stream) {
// Mute the default audio output
var audioTracks = stream.getAudioTracks();
for (var i = 0, l = audioTracks.length; i < l; i++) {
audioTracks[i].enabled = false;
}
// Start recording
// ...
});
尝试使用 AudioContext 中的 createMediaStreamDestination 方法来创建一个虚拟音频输出设备,并将录音的音频信号发送到该设备中,以避免回声的产生:
navigator.mediaDevices.getUserMedia({ audio: true }).then(function (stream) {
var context = new AudioContext();
// Create a virtual audio output device
var destination = context.createMediaStreamDestination();
var source = context.createMediaStreamSource(stream);
source.connect(destination);
// Start recording
// ...
});
如果对您有帮助,请给与采纳,谢谢。
在代码中没有看到播放音频的相关代码,我猜测可能是录音的时候触发了移动端的自动播放功能。
以下答案基于ChatGPT与GISer Liu编写:
在录音机停止录音后,调用MediaRecorder的stop方法来停止录音,这样就不会再播放录音了。
例如,在上面的代码中,你可以添加以下代码来停止录音:
let mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
// 在需要停止录音的地方调用 stop 方法
mediaRecorder.stop();
注意:stop方法停止录音后,你仍然可以使用MediaRecorder实例来访问录音数据。你可以使用MediaRecorder的ondataavailable事件处理程序来处理录音数据。例如:
mediaRecorder.ondataavailable = function(event) {
// 处理录音数据
var audioBlob = event.data;
};
提供参考实例:https://www.likecs.com/show-305971538.html
你可以尝试调用 stream.getTracks() 方法获取当前 MediaStream 中的所有轨道(track),并逐个关闭它们,从而停止录音和播放。
以下是修改后的代码:
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
let HZRecorder = function (stream, config) {
// const that = this;
config = config || {}
config.sampleBits = config.sampleBits || 16 // 采样数位 8, 16
config.sampleRate = config.sampleRate || 16000 // 采样率(1/6 44100)
let context = new (window.webkitAudioContext || window.AudioContext)()
let audioInput = context.createMediaStreamSource(stream)
let createScript = context.createScriptProcessor || context.createJavaScriptNode
let recorder = createScript.apply(context, [4096, 1, 1])
let transcount = 0;
let pos = 0;
let buffer48 = new Float32Array(1024 * 3);
let buffer16 = new Float32Array(buffer48.length / 3);
let audioData = {
size: 0, // 录音文件长度
buffer: [], // 录音缓存
inputSampleRate: context.sampleRate, // 输入采样率
inputSampleBits: 16, // 输入采样数位 8, 16
outputSampleRate: config.sampleRate, // 输出采样率
oututSampleBits: config.sampleBits, // 输出采样数位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // 合并压缩
// 合并
let data = new Float32Array(this.size)
let offset = 0
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 压缩
let compression = parseInt(this.inputSampleRate / this.outputSampleRate)
let length = data.length / compression
let result = new Float32Array(length)
let index = 0;
let j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
let bytes = this.compress()
let dataLength = bytes.length * (sampleBits / 8)
let buffer = new ArrayBuffer(44 + dataLength)
let data = new DataView(buffer)
let channelCount = 1 // 单声道
let offset = 0
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// 资源交换文件标识符
writeString('RIFF');
offset += 4
// 将Float32的音频数据转换为16位整型
let s = Math.max(-1, Math.min(1, data[i]))
let val = s < 0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data[offset] = val
offset++
}
return buffer
}
}
// 开始录音
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
// 停止录音
this.stop = function () {
recorder.disconnect()
}
// 获取音频文件
this.getBlob = function () {
this.stop()
return new Blob([audioData.encodeWAV()])
}
// 回放音频
this.play = function (audio) {
audio.src = window.URL.createObjectURL(this.getBlob())
}
// 上传音频
this.upload = function (url, callback) {
let fd = new FormData()
fd.append('audioData', this.getBlob())
let xhr = new XMLHttpRequest()
if (callback) {
xhr.upload.addEventListener('progress', function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load', function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error', function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort', function (e) {
callback('cancel', e)
}, false)
}
xhr.open('POST', url)
xhr.send(fd)
}
// 音频采集
recorder.onaudioprocess = function (e) {
let buffer = e.inputBuffer.getChannelData(0)
audioData.input(buffer)
}
// 处理播放
let blob = new Blob([this.encodeWAV()], { type: 'audio/wav' })
let audioURL = window.URL.createObjectURL(blob)
let audio = new Audio(audioURL)
audio.play()
}
}
// 音频采集
recorder.onaudioprocess = function (e) {
let float32buffer = e.inputBuffer.getChannelData(0)
buffer48.set(float32buffer, pos)
pos += float32buffer.length
if (pos >= buffer48.length) {
let tmpBuffer = new Float32Array(buffer48.length)
tmpBuffer.set(buffer48)
HZRecorder.recBuffers[transcount] = tmpBuffer
pos = 0
transcount++
}
}
this.start = function () {
audioData.buffer = []
audioData.size = 0
recorder.connect(context.destination)
}
this.stop = function () {
recorder.disconnect()
}
this.getBlob = function () {
return audioData.encodeWAV()
}
this.clear = function () {
audioData.buffer = []
audioData.size = 0
}
this.play = function () {
audioData.play()
}
this.download = function (name) {
let blob = new Blob([this.getBlob()], { type: 'audio/wav' })
let url = window.URL.createObjectURL(blob)
let a = document.createElement('a')
a.style.display = 'none'
a.href = url
a.download = name || 'output.wav'
document.body.appendChild(a)
a.click()
window.URL.revokeObjectURL(url)
}
this.getBase64 = function () {
let bytes = audioData.compress()
let binary = ''
for (let i = 0; i < bytes.length; i++) {
binary += String.fromCharCode(Math.round((bytes[i] + 1) / 2 * 255))
}
return 'data:audio/wav;base64,' + window.btoa(binary)
}
HZRecorder.recBuffers = []
HZRecorder.mergeBuffers = function (buffers, len) {
let result = new Float32Array(len)
let offset = 0
for (let i = 0; i < buffers.length; i++) {
result.set(buffers[i], offset)
offset += buffers[i].length
}
return result
}
HZRecorder.interleave = function (inputL, inputR) {
let length = inputL.length + inputR.length
let result = new Float32Array(length)
let index = 0
let inputIndex = 0
while (index < length) {
result[index++] = inputL[inputIndex]
result[index++] = inputR[inputIndex]
inputIndex++
}
return result
}
HZRecorder.writeUTFBytes = function (view, offset, string) {
let length = string.length
for (let i = 0; i < length; i++) {
view.setUint8(offset + i, string.charCodeAt(i))
}
}
navigator.getUserMedia({ audio: true }, function (stream) {
let recorder = new HZRecorder(stream)
document.getElementById('start').onclick = function () {
recorder.start()
}
document.getElementById('stop').onclick = function () {
recorder.stop()
recorder.download()
}
}, function (e) {
console.error(e)
})
没解决