暂时停止RecognitionListener

时间:2018-02-05 16:47:43

标签: android android-studio speech-recognition

我有一组活动,其架构与下面的代码类似。他们每个人总是听不同的语音命令来激发不同的动作。

我遇到的问题是当一个命令切换到另一个Activity时,监听器一直在新Activity上给出RECOGNIZER_BUSY错误,直到我回到que原始Activity,所以我猜我应该在改变之前以某种方式停止监听器活动的。我该怎么做?

代码(我的功能更多,这只是基本功能):

import android.Manifest;
import android.content.Context;
import android.content.Intent;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.os.Build;
import android.os.Bundle;
import android.os.Environment;
import android.os.Handler;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.support.v4.app.ActivityCompat;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.widget.Toast;

import java.io.File;
import java.util.ArrayList;

import static com.example.u523235.continuousspeech.JaroWinkler.compute;


public class TestActivity extends AppCompatActivity implements RecognitionListener {

private static final String TAG = "LOGING";
private SpeechRecognizer recog;
private AudioManager manager;
private Handler handler;
private Runnable readyRecognizeSpeech;

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_test);

    ActivityCompat.requestPermissions(
            this,
            new String[]{
                    Manifest.permission.WRITE_EXTERNAL_STORAGE,
                    Manifest.permission.CAMERA,
                    Manifest.permission.INTERNET,
                    Manifest.permission.RECORD_AUDIO,},
            0);

    this.handler = new Handler();
    this.readyRecognizeSpeech = new Runnable() {
        @Override
        public void run() {
            startRecognizeSpeech();
        }
    };

    this.manager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);

    audio(false);
    this.handler.postDelayed(this.readyRecognizeSpeech, 100);
}

private void startRecognizeSpeech() {
    if (this.recog != null) {
        this.recog.destroy();
    }

    this.recog = SpeechRecognizer.createSpeechRecognizer(TestActivity.this);

    Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    String languagePref = "pt_PT";
    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, languagePref);
    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE, languagePref);
    intent.putExtra(RecognizerIntent.EXTRA_ONLY_RETURN_LANGUAGE_PREFERENCE, languagePref);
    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    intent.putExtra("android.speech.extra.DICTATION_MODE", true);
    intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true);

    this.recog.setRecognitionListener(TestActivity.this);
    this.recog.startListening(intent);
}

public void audio(boolean flag) {
    if (flag) {
        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
            manager.adjustStreamVolume(AudioManager.STREAM_NOTIFICATION, AudioManager.ADJUST_UNMUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_ALARM, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_MUSIC, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_RING, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_SYSTEM, AudioManager.ADJUST_MUTE, 0);
        } else {
            manager.setStreamMute(AudioManager.STREAM_NOTIFICATION, false);
            manager.setStreamMute(AudioManager.STREAM_ALARM, false);
            manager.setStreamMute(AudioManager.STREAM_MUSIC, false);
            manager.setStreamMute(AudioManager.STREAM_RING, false);
            manager.setStreamMute(AudioManager.STREAM_SYSTEM, false);
        }
    } else {
        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
            manager.adjustStreamVolume(AudioManager.STREAM_NOTIFICATION, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_ALARM, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_MUSIC, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_RING, AudioManager.ADJUST_MUTE, 0);
            manager.adjustStreamVolume(AudioManager.STREAM_SYSTEM, AudioManager.ADJUST_MUTE, 0);
        } else {
            manager.setStreamMute(AudioManager.STREAM_NOTIFICATION, true);
            manager.setStreamMute(AudioManager.STREAM_ALARM, true);
            manager.setStreamMute(AudioManager.STREAM_MUSIC, true);
            manager.setStreamMute(AudioManager.STREAM_RING, true);
            manager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
        }
    }
}

@Override
public void onReadyForSpeech(Bundle params) {
    Log.d(TAG, "ready for speech");
}

@Override
public void onBeginningOfSpeech() {
    Log.d(TAG, "beginning of speech");
}

@Override
public void onBufferReceived(byte[] buffer) {
    Log.d(TAG, "onBufferReceived");
    this.handler.post(this.readyRecognizeSpeech);
}

@Override
public void onRmsChanged(float rmsdB) {
    //Log.d(TAG, "Received : " + rmsdB + "dB");
}

@Override
public void onEndOfSpeech() {
    Log.d(TAG, "end of speech");
    this.handler.post(this.readyRecognizeSpeech);
}

@Override
public void onError(int error) {
    Log.d(TAG, "on error");
    switch (error) {
        case SpeechRecognizer.ERROR_AUDIO:
            Log.d(TAG, "ERROR_AUDIO");
            break;
        case SpeechRecognizer.ERROR_CLIENT:
            Log.d(TAG, "ERROR_CLIENT");
            break;
        case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
            Log.d(TAG, "ERROR_INSUFFICIENT_PERMISSIONS");
            break;
        case SpeechRecognizer.ERROR_NETWORK:
            Log.d(TAG, "ERROR_NETWORK");
            break;
        case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
            Log.d(TAG, "ERROR_NETWORK_TIMEOUT");
            break;
        case SpeechRecognizer.ERROR_NO_MATCH:
            Log.d(TAG, "ERROR_NO_MATCH");
            break;
        case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
            Log.d(TAG, "ERROR_RECOGNIZER_BUSY");
            break;
        case SpeechRecognizer.ERROR_SERVER:
            Log.d(TAG, "ERROR_SERVER");
            break;
        case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
            Log.d(TAG, "ERROR_SPEECH_TIMEOUT");
            break;
        default:
    }
    this.handler.post(this.readyRecognizeSpeech);
}

@Override
public void onEvent(int eventType, Bundle params) {
    Log.d(TAG, "on event");
}

@Override
public void onPartialResults(Bundle partialResults) {
    Log.d(TAG, "on partial results");
    ArrayList<String> data = partialResults.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    ArrayList<String> unstableData = partialResults.getStringArrayList("android.speech.extra.UNSTABLE_TEXT");
    if (data != null) {
        data.addAll(unstableData);
        receiveResults(data);
    } else {
        receiveResults(unstableData);
    }
    this.handler.post(this.readyRecognizeSpeech);
}

@Override
public void onResults(Bundle data) {
    Log.d(TAG, "on final results");
    ArrayList<String> rec = data.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    ArrayList<String> unstableData = data.getStringArrayList("android.speech.extra.UNSTABLE_TEXT");
    assert rec != null;
    if (rec != null) {
        rec.addAll(unstableData);
        receiveResults(rec);
    } else {
        receiveResults(unstableData);
    }
    this.handler.post(this.readyRecognizeSpeech);
}

public void receiveResults(ArrayList<String> data) {
    for (String s : data) {
        if (s.length() > 0) {
            Log.d(TAG, "-> " + s + " | SIMILAR: " + compute(s.toUpperCase(), "FEITO") + "\n");

            String inst1 = "TOCAR";
            if (compute(inst1, s.toUpperCase()) > 0.85) {
                playAudio();
                break;
            }
        }
    }
}

public void playAudio() {
    //set up MediaPlayer
    MediaPlayer mp = new MediaPlayer();
    String path = Environment.getExternalStorageDirectory() + File.separator + "sound.mp3";
    Log.d(TAG, "Playing: " + path);

    try {
        mp.setDataSource(path);
        mp.prepare();
        mp.setVolume(100, 100);
        mp.start();
        mp.setVolume(0, 0);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

@Override
public void onBackPressed() {
    Toast.makeText(this, "Operación Inválida", Toast.LENGTH_SHORT).show();
}

@Override
public void onResume() {
    super.onResume();
    this.handler.post(this.readyRecognizeSpeech);
}

@Override
protected void onPause() {
    super.onPause();
    Log.i(TAG, "on pause called");
    if (this.recog != null) {
        this.recog.destroy();
    }
    this.recog = null;
}
}

1 个答案:

答案 0 :(得分:0)

即使你设法让某种类型的实现工作,这也是一个非常糟糕的主意 - SpeechRecognizer并不意味着以这种方式“循环”以进行持续识别。

警告完成后,您需要创建一个处理识别循环的方法。您正在从代码中的任何位置调用this.handler.post(this.readyRecognizeSpeech)

private void loopAgainIfIShould(){

if(parametersAreOk){
   this.handler.post(this.readyRecognizeSpeech)
} else {
// don't
}

您要检查的参数是您的活动是否不在前台,您可以在onPause中设置变量,也可以在收到来电时等等。

只有满足所有条件时,才能重新开始循环。