我已经调用了麦克风成功录音,但保存成本地格式一直出错!有大侠有例子吗?
根据你的文件内容 把这些成员填上放到你的文件头 就是一个wav文件了啊 音频文件打包就是这么干的
WAV 格式就是普通的pcm raw 数据加上一个wav的头信息 unsigned charwav_header[] = {
'R', 'I', 'F', 'F', //"RIFF"标志
0xfc, 0xff, 0xff, 0xff, //文件长度
'W', 'A', 'V', 'E', //"WAVE"标志
'f', 'm', 't', ' ', //"fmt"标志
16, 0, 0, 0, //过渡字节(不定)
WAVE_FORMAT_PCM, WAVE_FORMAT_PCM >>8,// 格式类别
0, 0, //声道数
0, 0, 0, 0, //采样率
0, 0, 0, 0, //位速
0, 0, //一个采样多声道数据块大小
16, 0, //一个采样占的bit数
'd', 'a', 't', 'a', //数据标记符"data"
0xd8,0xff, 0xff, 0xff //语音数据的长度,比文件长度小36
}加上就好
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using UnityEditor;
public class MicroPhoneInput : MonoBehaviour
{
/*
开始录音后报错:
Starting Microphone failed. result=25 (Unsupported file or audio format. )
UnityEngine.Microphone:Start(String, Boolean, Int32, Int32)
MicroPhoneInput:StartRecord() (at Assets/Script/MicroPhoneInput.cs:64)
MicroPhoneInput:OnGUI() (at Assets/Script/MicroPhoneInput.cs:41)
从unity3d社区上获取答案 http://answers.unity3d.com/search.html?f=&type=question&redirect=search%2Fsearch&sort=relevance&q=MicroPhone
用.net自带录音
利用插件
*/
public AudioSource audio;
void Start()
{
audio = GetComponent<AudioSource>();
string[] ms = Microphone.devices;
Debug.Log("录音设备数量:" + ms.Length);
deviceCount = ms.Length;
if (deviceCount == 0)
{
Debug.Log("no microphone found");
}
}
int deviceCount;
void OnGUI()
{
if (deviceCount > 0)
{
if (!Microphone.IsRecording(null) && GUILayout.Button("Start", GUILayout.Height(Screen.height / 20), GUILayout.Width(Screen.width / 5)))
{
StartRecord();
}
if (Microphone.IsRecording(null) && GUILayout.Button("Stop", GUILayout.Height(Screen.height / 20), GUILayout.Width(Screen.width / 5)))
{
StopRecord();
}
if (!Microphone.IsRecording(null) && GUILayout.Button("Play", GUILayout.Height(Screen.height / 20), GUILayout.Width(Screen.width / 5)))
{
PlayRecord();
}
if (!Microphone.IsRecording(null) && GUILayout.Button("Print", GUILayout.Height(Screen.height / 20), GUILayout.Width(Screen.width / 5)))
{
PrintRecord();
}
}
}
void StartRecord()
{
Debug.Log("录音设备名字:" + Microphone.devices[0]);
int minFreq, maxFreq;
Microphone.GetDeviceCaps(Microphone.devices[0], out minFreq, out maxFreq);
Debug.Log("设备最小频率 = " + minFreq.ToString() + " 最大频率 = " + maxFreq.ToString());
audio.clip = Microphone.Start(null, true, 10, 14100);
while (!(Microphone.GetPosition(null) > 0)) { }
Debug.Log("devices' Rand = " + Microphone.devices.Rank.ToString());
Debug.Log("StartRecord");
}
void StopRecord()
{
if (!Microphone.IsRecording(null))
{
return;
}
Microphone.End(null);
audio.Stop();
Debug.Log("StopRecord");
}
void PrintRecord()
{
if (Microphone.IsRecording(null))
{
return;
}
SavWav.Save("GGD", audio.clip);
}
void PlayRecord()
{
if (Microphone.IsRecording(null))
{
return;
}
if (audio.clip == null)
{
return;
}
audio.Play();
Debug.Log("PlayRecord");
}
public byte[] GetClipData()
{
if (audio.clip == null)
{
Debug.Log("GetClipData audio.clip is null");
return null;
}
float[] samples = new float[audio.clip.samples * audio.clip.channels];
audio.clip.GetData(samples, 0);
Debug.Log("sample.lenght = " + samples.Length);
byte[] outData = new byte[samples.Length * 4];
/* */
// System.Buffer.BlockCopy(samples, 0, outData, 0, samples.Length);
int top = 0;
for (int i = 0; i < samples.Length; i++)
{
var by = System.BitConverter.GetBytes(samples[i]);
for (int j = 2; j < 4; j++)
outData[top++] = by[j];
for (int j = 0; j < 2; j++)
outData[top++] = by[j];
}/**/
// outData[0] = 1;
if (audio.clip.channels == 1)
{
}
else if (audio.clip.channels == 2)
{
}
if (outData == null || outData.Length <= 0)
{
Debug.Log("GetClipData intData is null");
return null;
}
else
{
}
return outData;
/**/
}
}
using System;
using System.IO;
using UnityEngine;
using System.Collections.Generic;
public static class SavWav {
const int HEADER_SIZE = 44;
public static bool Save(string filename, AudioClip clip) {
if (!filename.ToLower().EndsWith(".wav")) {
filename += ".wav";
}
var filepath = Path.Combine(Application.dataPath, filename);
Debug.Log(filepath);
// Make sure directory exists if user is saving to sub dir.
Directory.CreateDirectory(Path.GetDirectoryName(filepath));
using (var fileStream = CreateEmpty(filepath)) {
ConvertAndWrite(fileStream, clip);
WriteHeader(fileStream, clip);
}
return true; // TODO: return false if there's a failure saving the file
}
public static AudioClip TrimSilence(AudioClip clip, float min) {
var samples = new float[clip.samples];
clip.GetData(samples, 0);
return TrimSilence(new List<float>(samples), min, clip.channels, clip.frequency);
}
public static AudioClip TrimSilence(List<float> samples, float min, int channels, int hz) {
return TrimSilence(samples, min, channels, hz, false, false);
}
public static AudioClip TrimSilence(List<float> samples, float min, int channels, int hz, bool _3D, bool stream) {
int i;
for (i=0; i<samples.Count; i++) {
if (Mathf.Abs(samples[i]) > min) {
break;
}
}
samples.RemoveRange(0, i);
for (i=samples.Count - 1; i>0; i--) {
if (Mathf.Abs(samples[i]) > min) {
break;
}
}
samples.RemoveRange(i, samples.Count - i);
var clip = AudioClip.Create("TempClip", samples.Count, channels, hz, _3D, stream);
clip.SetData(samples.ToArray(), 0);
return clip;
}
static FileStream CreateEmpty(string filepath) {
var fileStream = new FileStream(filepath, FileMode.Create);
byte emptyByte = new byte();
for(int i = 0; i < HEADER_SIZE; i++) //preparing the header
{
fileStream.WriteByte(emptyByte);
}
return fileStream;
}
static void ConvertAndWrite(FileStream fileStream, AudioClip clip) {
var samples = new float[clip.samples];
clip.GetData(samples, 0);
Int16[] intData = new Int16[samples.Length];
//converting in 2 float[] steps to Int16[], //then Int16[] to Byte[]
Byte[] bytesData = new Byte[samples.Length * 2];
//bytesData array is twice the size of
//dataSource array because a float converted in Int16 is 2 bytes.
int rescaleFactor = 32767; //to convert float to Int16
for (int i = 0; i<samples.Length; i++) {
intData[i] = (short) (samples[i] * rescaleFactor);
Byte[] byteArr = new Byte[2];
byteArr = BitConverter.GetBytes(intData[i]);
byteArr.CopyTo(bytesData, i * 2);
}
fileStream.Write(bytesData, 0, bytesData.Length);
}
static void WriteHeader(FileStream fileStream, AudioClip clip) {
var hz = clip.frequency;
var channels = clip.channels;
var samples = clip.samples;
fileStream.Seek(0, SeekOrigin.Begin);
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF");
fileStream.Write(riff, 0, 4);
Byte[] chunkSize = BitConverter.GetBytes(fileStream.Length - 8);
fileStream.Write(chunkSize, 0, 4);
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE");
fileStream.Write(wave, 0, 4);
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt ");
fileStream.Write(fmt, 0, 4);
Byte[] subChunk1 = BitConverter.GetBytes(16);
fileStream.Write(subChunk1, 0, 4);
UInt16 two = 2;
UInt16 one = 1;
Byte[] audioFormat = BitConverter.GetBytes(one);
fileStream.Write(audioFormat, 0, 2);
Byte[] numChannels = BitConverter.GetBytes(channels);
fileStream.Write(numChannels, 0, 2);
Byte[] sampleRate = BitConverter.GetBytes(hz);
fileStream.Write(sampleRate, 0, 4);
Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); // sampleRate * bytesPerSample*number of channels, here 44100*2*2
fileStream.Write(byteRate, 0, 4);
UInt16 blockAlign = (ushort) (channels * 2);
fileStream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
UInt16 bps = 16;
Byte[] bitsPerSample = BitConverter.GetBytes(bps);
fileStream.Write(bitsPerSample, 0, 2);
Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data");
fileStream.Write(datastring, 0, 4);
Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
fileStream.Write(subChunk2, 0, 4);
// fileStream.Close();
}
}
http://www.cnblogs.com/fortomorrow/archive/2012/10/31/unity06.html