Android的 - AudioRecord类不读取数据,audioData和fftArray返回零数据、AudioRecord、Android、audioData

2023-09-06 13:25:19 作者:哈里波特大

我是新来的Andr​​oid和我一直在一个间距分析应用程序(最低SDK:8)。我读了如何实现Audiorecord类的文章很多,但我不知道为什么它不读取任何数据,当我记录。我试图显示audioData和fftArray但返回零值,所以我认为这个问题是与读取方法。请尝试检查这些。下面是codeS我用:

FFT.java Complex.java

record.java

 最终意向意图=新的意图(pitch.analyzer.PitZer.ASSESSMENT);
MediaRecorder记录;
AudioRecord调谐器;
INT audioSource = MediaRecorder.AudioSource.MIC;
INT sampleRateInHz = AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_SYSTEM);
INT channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;
INT AudioFormat的= AudioFormat.ENCODING_PCM_16BIT;
INT bufferSizeInBytes = 4096;

INT样本;
短[] audioBuffer;
短[] audioData;
双[]温度;
TextView的FFT;
TextView的结果;
// TextView的缓冲区大小;
复[] fftTempArray;
复[] fftArray;
复[] fftInverse;

@覆盖
保护无效的onCreate(包savedInstanceState){


    // TODO自动生成方法存根
    super.onCreate(savedInstanceState);
    的setContentView(R.layout.record);

    按钮启动=(按钮)findViewById(R.id.record);
    按钮停止=(按钮)findViewById(R.id.stop);
    FFT =(TextView中)findViewById(R.id.fft);
    结果=(TextView中)findViewById(R.id.results);
    // BUFFERSIZE =(TextView中)findViewById(R.id.bufferSize);
    audioData =新的短[bufferSizeInBytes]
    调谐器=新AudioRecord(audioSource,sampleRateInHz,channelConfig,AudioFormat的,bufferSizeInBytes);
    //最后AudioRecorder记录=新AudioRecorder(/听力计/ TEMP);

    start.setOnClickListener(新OnClickListener(){

        公共无效的onClick(视图v){
            获得();
            computeFFT();
            显示();
        }
    });

    //â€|.wait一段时间

        stop.setOnClickListener(新OnClickListener(){

            公共无效的onClick(视图v){
                startActivity(意向);

            }
        });
}
公共无效获取(){
    尝试 {
        tuner.startRecording();
        样本= tuner.read(audioData,0,bufferSizeInBytes);
    }
    捕获(的Throwable T){

    }
}

公共无效computeFFT(){
    //从短期转换为双
    双[] micBufferData =新的双[bufferSizeInBytes]; //大小可能需要改变
    最终诠释bytesPerSample = 2; //由于是16bit PCM
    最后两级放大= 100.0; //选择一些你喜欢
    对于(INT指数= 0,floatIndex = 0;指数< bufferSizeInBytes  -  bytesPerSample + 1;指数+ = bytesPerSample,floatIndex ++){
        双样本= 0;
        为(中间体B = 0; b将bytesPerSample; b ++){
            INT V = audioData [指数+ B]。
            如果(b将bytesPerSample  -  1 || bytesPerSample == 1){
                V&安培; = 0xFF的;
            }
            样品+ = V<<状(b * 8);
        }
        双sample32 =放大*(样品/ 32768.0);
        micBufferData [floatIndex] = sample32;
    }

    //创建复杂的阵列在FFT使用
    fftTempArray =新的复杂[bufferSizeInBytes]
    的for(int i = 0; I< bufferSizeInBytes;我++)
    {
        fftTempArray [I] =新络合物(micBufferData [I],0);
    }

    //获取的FFT数据数组
    fftArray = FFT.fft(fftTempArray);
    fftInverse = FFT.ifft(fftTempArray);
    双[] FREQ2 =新的双[fftArray.length]
    //创建fftArray的大小的阵列
    双[]幅度=新的双[fftArray.length]
    的for(int i = 0; I< fftArray.length;我++){
        幅度[i] = fftArray [I]。ABS();
        FREQ2 [I] = ComputeFrequency(大小[I]);
    }

    fft.setTextColor(Color.BLUE);
    //fft.setText("fftArray为+ fftArray [500] +和fftTempArray为+ fftTempArray [500] +和fftInverse为+ fftInverse [500] +和audioData为+ audioData [500] +和幅度为+量值[1] +,+量值[500] +,+量值[1000] +和freq2时为+ FREQ2 [1] +你的岩石哥们)!;
    / *的for(int i = 2; I<样本;我++){
        fft.append(+量值[I] +赫兹);
    }
    的for(int i = 2; I<样本;我++){
        fft.append(+ FREQ2 [I] +赫兹);
    }
    * /
}

私人双人ComputeFrequency(双arrayIndex){
    返回((1.0 * sampleRateInHz)/(1.0 * 100))* arrayIndex;
    }

公共无效显示器(){
    results.setTextColor(Color.BLUE);
    results.setText(结果:+ audioData [1] +);
    的for(int i = 2; I<样本;我++){
        results.append(+ audioData [I]);
    }
    results.invalidate();
    //fft.setTextColor(Color.GREEN);
    fft.setText(sampleRateInHz:+ sampleRateInHz);
    fft.append(\ nfftArray:+ fftArray [0] +赫兹);
    的for(int i = 1; I<样本;我++){
        fft.append(+ fftArray [I] +赫兹);
        }
    fft.append(\ naudioData:+ audioData [1]);
    fft.append(\ nsamples:+样本);
    //fft.invalidate();
}
公共无效停止()抛出IOException异常{
    tuner.stop();
    //audioInput.reset();
    tuner.release();

    //recorder.stop();
    //recorder.reset();
    //recorder.release();
  }
 

解决方案

从之前的设备,你应该开始录制(并停止它一旦你完成)阅读。 这里是codeI使用了一个简单的读:

 短[] audioData =新的短[BUFFERSIZE]

 INT偏移= 0;
 INT shortRead = 0;

 //启动窃听到的麦克风
 audioRecored.startRecording();

 //开始从麦克风到内部缓冲区中读取 - 夹头由大块
 而(偏移< BUFFERSIZE)
 {
    shortRead = audioRecored.read(audioData,偏移,BUFFERSIZE  - 偏移量);
        胶印+ = shortRead;
 }

 //阻止窃听到的麦克风
 audioRecored.stop();
 
Audio Record Service Plus安卓版下载 手机Audio Record Service Plus官网最新版

I am new to android and I have been working on a Pitch Analyzer application (minimum SDK: 8). I read many articles on how to implement Audiorecord class but I wonder why it does not read any data when I record. I tried to display the values of the audioData and fftArray but zero is returned, so I assumed the problem is with the read method. Please try to check these. Here are the codes I used:

FFT.java Complex.java

record.java

final Intent intent = new Intent("pitch.analyzer.PitZer.ASSESSMENT");
MediaRecorder recorder;
AudioRecord tuner;
int audioSource = MediaRecorder.AudioSource.MIC;
int sampleRateInHz = AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_SYSTEM);
int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
int bufferSizeInBytes = 4096;

int samples;
short[] audioBuffer;
short[] audioData;
double[] temp;
TextView fft;
TextView results;
//TextView bufferSize;
Complex[] fftTempArray;
Complex[] fftArray;
Complex[] fftInverse;

@Override
protected void onCreate(Bundle savedInstanceState) {


    // TODO Auto-generated method stub
    super.onCreate(savedInstanceState);
    setContentView(R.layout.record);

    Button start=(Button)findViewById(R.id.record);
    Button stop=(Button)findViewById(R.id.stop);
    fft = (TextView)findViewById(R.id.fft);
    results = (TextView)findViewById(R.id.results);
    //bufferSize = (TextView)findViewById(R.id.bufferSize);     
    audioData = new short[bufferSizeInBytes];
    tuner = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes);
    //final AudioRecorder recorder = new AudioRecorder("/audiometer/temp");

    start.setOnClickListener(new OnClickListener() {

        public void onClick(View v) {
            acquire();
            computeFFT();
            display();
        }
    });       

    //….wait a while

        stop.setOnClickListener(new OnClickListener() {

            public void onClick(View v) {
                startActivity(intent);

            }
        });
}
public void acquire(){
    try {
        tuner.startRecording();
        samples = tuner.read(audioData, 0, bufferSizeInBytes);
    }
    catch (Throwable t){

    }   
}

public void computeFFT(){
    //Conversion from short to double
    double[] micBufferData = new double[bufferSizeInBytes];//size may need to change
    final int bytesPerSample = 2; // As it is 16bit PCM
    final double amplification = 100.0; // choose a number as you like
    for (int index = 0, floatIndex = 0; index < bufferSizeInBytes - bytesPerSample + 1; index += bytesPerSample, floatIndex++) {
        double sample = 0;
        for (int b = 0; b < bytesPerSample; b++) {
            int v = audioData[index + b];
            if (b < bytesPerSample - 1 || bytesPerSample == 1) {
                v &= 0xFF;
            }
            sample += v << (b * 8);
        }
        double sample32 = amplification * (sample / 32768.0);
        micBufferData[floatIndex] = sample32;
    }

    //Create Complex array for use in FFT
    fftTempArray = new Complex[bufferSizeInBytes];
    for (int i=0; i<bufferSizeInBytes; i++)
    {
        fftTempArray[i] = new Complex(micBufferData[i], 0);
    }

    //Obtain array of FFT data
    fftArray = FFT.fft(fftTempArray);
    fftInverse = FFT.ifft(fftTempArray);
    double[] freq2 = new double[fftArray.length];
    //Create an array of magnitude of fftArray
    double[] magnitude = new double[fftArray.length];
    for (int i=0; i<fftArray.length; i++){
        magnitude[i]= fftArray[i].abs();
        freq2[i] = ComputeFrequency(magnitude[i]);
    }

    fft.setTextColor(Color.BLUE);
    //fft.setText("fftArray is "+ fftArray[500] +" and fftTempArray is "+fftTempArray[500] + " and fftInverse is "+fftInverse[500]+" and audioData is "+audioData[500]+ " and magnitude is "+ magnitude[1] + ", "+magnitude[500]+", "+magnitude[1000]+ " and freq2 is "+ freq2[1]+" You rock dude!");
    /*for(int i = 2; i < samples; i++){
        fft.append(" " + magnitude[i] + " Hz");
    }
    for(int i = 2; i < samples; i++){
        fft.append(" " + freq2[i] + " Hz");
    }
    */
}

private double ComputeFrequency(double arrayIndex) {
    return ((1.0 * sampleRateInHz) / (1.0 * 100)) * arrayIndex;
    }

public void display(){
    results.setTextColor(Color.BLUE);
    results.setText("results: "+audioData[1]+"");
    for(int i = 2; i < samples; i++){
        results.append(" " + audioData[i]);
    }
    results.invalidate();
    //fft.setTextColor(Color.GREEN);
    fft.setText("sampleRateInHz: "+sampleRateInHz);
    fft.append("\nfftArray: "+fftArray[0]+" Hz");
    for(int i = 1; i < samples; i++){
        fft.append(" " + fftArray[i] + " Hz");
        }
    fft.append("\naudioData: "+audioData[1]);
    fft.append("\nsamples: "+samples);
    //fft.invalidate();
}
public void stop() throws IOException {
    tuner.stop();
    //audioInput.reset();
    tuner.release();

    //recorder.stop();
    //recorder.reset();
    //recorder.release();
  }

解决方案

Before reading from the device you should start a recording(and stop it once you finish). Here is the code i use for a simple read:

 short[] audioData = new short[bufferSize];

 int offset =0;
 int shortRead = 0;

 //start tapping into the microphone
 audioRecored.startRecording();

 //start reading from the microphone to an internal buffer - chuck by chunk
 while (offset < bufferSize)
 {
    shortRead = audioRecored.read(audioData, offset ,bufferSize - offset);
        offset += shortRead;
 }

 //stop tapping into the microphone
 audioRecored.stop();