Android的语音识别应用程序不拉网 [英] Android speech Recognition App Without Pop Up

查看:117
本文介绍了Android的语音识别应用程序不拉网的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我目前正在研究得到一个职业生涯,JAVA,并已决定先建立一个应用程序。 我有这个code就在这里,我使用触发语音识别。

 公共类MainActivity扩展活动实现OnClickListener {

私有静态最终诠释VR_REQUEST = 999;
私人的ListView词表;
私人最终字符串LOG_TAG =SpeechRepeatActivity;
@覆盖
保护无效的onCreate(包savedInstanceState){
    super.onCreate(savedInstanceState);
    的setContentView(R.layout.activity_main);
    按钮speechBtn =(按钮)findViewById(R.id.speech_btn);
    单词表=(ListView控件)findViewById(R.id.word_list);
    PackageManager packManager = getPackageManager();
    名单< ResolveInfo> intActivities = packManager.queryIntentActivities
                    (新意图(RecognizerIntent.ACTION_RECOGNIZE_SPEECH),0);
    如果(intActivities.size()!= 0){
        speechBtn.setOnClickListener(本);
    } 其他 {
        speechBtn.setEnabled(假);
        Toast.makeText(这一点,哎呀 - 语音识别不支持!,
                                             Toast.LENGTH_LONG).show();
        }
}
@覆盖
公共布尔onCreateOptionsMenu(功能菜单){
    //充气菜单;这增加了项目操作栏,如果它是present。
    。getMenuInflater()膨胀(R.menu.main,菜单);
    返回true;
}
公共无效的onClick(视图v){
   如果(v.getId()== R.id.speech_btn){
    listenToSpeech();
   }
}
    私人无效listenToSpeech(){
    //启动语音识别意图传递所需的数据
    意图listenIntent =
                     新的意图(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    //表示包
    listenIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                        。的getClass()getPackage()的getName());
    //一边听着显示消息
    listenIntent.putExtra(RecognizerIntent.EXTRA_PROMPT,说一个字!);
    //设置语音模型
    listenIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                 RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    //指定结果数量检索
    listenIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,10);
    //开始听
    startActivityForResult(listenIntent,VR_REQUEST);
}
    @覆盖
    保护无效onActivityResult(INT申请code,
                                             INT结果code,意图数据){
        //检查语音识别结果
        如果(要求code == VR_REQUEST和放大器;&安培;结果code == RESULT_OK){
    返回的单词列表//存储为一个ArrayList
    ArrayList的<字符串> suggestedWords =数据。
                     getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
    //设置检索列表中的ListView显示
            //使用ArrayAdapter
    wordList.setAdapter(新ArrayAdapter<字符串>
                                       (这一点,R.layout.word,suggestedWords));
}
    //这个检测哪一个用户点击
    wordList.setOnItemClickListener(新OnItemClickListener(){
        //点​​击监听器内的列表项
        公共无效onItemClick(适配器视图<>父,
                                           视图中查看,INT位置,长的id){
        //投
        TextView的wordView =(TextView中)
        // retrive所选择的字
        字符串wordChosen =(字符串)wordView。
        //输出调试
        Log.v(LOG_TAG,选择:+ wordChosen);
     }});
        super.onActivityResult(要求code,因此code,数据);
  }
}
 

在此应用程序的用户presses一个按钮,获取显示与谷歌语音输入屏幕,您可以点击一个按钮(它实际上会自动进入),你可以说话,它会停止,它会显示出来。我不希望这个窗口弹出所有虽然。而不是仅仅让用户单击该按钮并能够说话,让应用程序停止并自动显示文本(它已经这样做了)。

请!据我所知,答案展示如何做到这一点的形式,其实老天发布<一个用户名,已经有href="https://$c$c.google.com/p/my-work-and-play-android/source/browse/trunk/android/speech/SpeechRecognizer/SpeechRecognizer/src/com/SpeechRecognizer/SpeechRecognizerActivity.java">some code就在这里。

我不知道,如果我知道在哪里把这个在我的项目文件。我是一个小白!如果有人可以帮助澄清这一点,我将不胜AP preciate你的帮助。

下面是我的code:

 包com.example.speechrecognizertest;

进口android.os.Bundle;
进口的java.util.ArrayList;
进口的java.util.List;
进口android.app.Activity;
进口android.content.Intent;
进口android.content.pm.PackageManager;
进口android.content.pm.ResolveInfo;
进口android.speech.RecognitionListener;
进口android.speech.RecognizerIntent;
进口android.speech.SpeechRecognizer;
进口android.util.Log;
进口android.view.View;
进口android.view.View.OnClickListener;
进口android.widget.AdapterView;
进口android.widget.AdapterView.OnItemClickListener;
进口android.widget.ArrayAdapter;
进口android.widget.Button;
进口android.widget.ListView;
进口android.widget.Toast;
进口android.widget.TextView;
进口android.app.Activity;
进口android.view.Menu;

公共类MainActivity延伸活动{

私有静态最终诠释VR_REQUEST = 999;
公共静态最终字符串变量= NULL;
私人的ListView词表;
私人最终字符串LOG_TAG =SpeechRepeatActivity;
私人SpeechRecognizer mSpeechRecognizer;
私人意图mSpeechRecognizerIntent;
私人布尔mIslistening;

@覆盖
保护无效的onCreate(包savedInstanceState){
    super.onCreate(savedInstanceState);
    的setContentView(R.layout.activity_main);
    按钮speechBtn =(按钮)findViewById(R.id.speech_btn);
    单词表=(ListView控件)findViewById(R.id.word_list);
    PackageManager packManager = getPackageManager();
    名单&LT; ResolveInfo&GT; intActivities = packManager.queryIntentActivities(
            新意图(RecognizerIntent.ACTION_RECOGNIZE_SPEECH),0);
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(本);
    mSpeechRecognizerIntent =新的意图(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());
    如果(!mIslistening)
    {
        mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
    } 其他 {
        speechBtn.setEnabled(假);
        Toast.makeText(这一点,哎呀 - 语音识别不支持!,
                Toast.LENGTH_LONG).show();
    }
}


@覆盖
保护无效的onDestroy(){
    // TODO自动生成方法存根
    super.onDestroy();
}


@覆盖
公共布尔onCreateOptionsMenu(功能菜单){
    //充气菜单;这增加了项目操作栏,如果它是present。
    。getMenuInflater()膨胀(R.menu.main,菜单);
    返回true;
}



保护类SpeechRecognitionListener实现RecognitionListener
{

    @覆盖
    公共无效onBeginningOfSpeech()
    {
        //Log.d(TAG,onBeginingOfSpeech);
    }

    @覆盖
    公共无效onBufferReceived(byte []的缓冲区)
    {

    }

    @覆盖
    公共无效onEndOfSpeech()
    {
        //Log.d(TAG,onEndOfSpeech);
     }

    @覆盖
    公共无效onerror的(INT错误)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG,错误=+误差);
    }

    @覆盖
    公共无效的onEvent(INT事件类型,捆绑PARAMS)
    {

    }

    @覆盖
    公共无效onPartialResults(包partialResults)
    {

    }

    @覆盖
    公共无效onReadyForSpeech(包PARAMS)
    {
        Log.d(TAG,OnReadyForSpeech); // $ NON-NLS-1 $
    }

    @覆盖
    公共无效onResults(捆绑结果)
    {
        //Log.d(TAG,onResults); // $ NON-NLS-1 $
        ArrayList的&LT;字符串&GT; suggestedWords = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        //比赛是语音识别引擎的返回值
        //使用任何你想做的这些值

        wordList.setAdapter(新ArrayAdapter&LT;字符串&GT;(这一点,R.layout.word,suggestedWords));



    }

    @覆盖
    公共无效onRmsChanged(浮动rmsdB)
    {

    }

}
 

解决方案

的Andr​​oidManifest.xml

添加以下权限:

 &LT;使用-权限的Andr​​oid:名称=android.permission.RECORD_AUDIO/&GT;
 

类成员

 私人SpeechRecognizer mSpeechRecognizer;
私人意图mSpeechRecognizerIntent;
私人布尔mIslistening;
 

在的onCreate

  @覆盖
保护无效的onCreate(包savedInstanceState)
{
    super.onCreate(savedInstanceState);
    .........
    .........
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(本);
    mSpeechRecognizerIntent =新的意图(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());


    SpeechRecognitionListener监听器=新SpeechRecognitionListener();
    mSpeechRecognizer.setRecognitionListener(听众);

}
 

在您的按钮,监听器只使用此code

 如果(!mIsListening)
{
    mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
 

在的onDestroy

 如果(mSpeechRecognizer!= NULL)
{
        mSpeechRecognizer.destroy();
}
 

在你的活动创建内部类

 保护类SpeechRecognitionListener实现RecognitionListener
{

    @覆盖
    公共无效onBeginningOfSpeech()
    {
        //Log.d(TAG,onBeginingOfSpeech);
    }

    @覆盖
    公共无效onBufferReceived(byte []的缓冲区)
    {

    }

    @覆盖
    公共无效onEndOfSpeech()
    {
        //Log.d(TAG,onEndOfSpeech);
     }

    @覆盖
    公共无效onerror的(INT错误)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG,错误=+误差);
    }

    @覆盖
    公共无效的onEvent(INT事件类型,捆绑PARAMS)
    {

    }

    @覆盖
    公共无效onPartialResults(包partialResults)
    {

    }

    @覆盖
    公共无效onReadyForSpeech(包PARAMS)
    {
        Log.d(TAG,onReadyForSpeech); // $ NON-NLS-1 $
    }

    @覆盖
    公共无效onResults(捆绑结果)
    {
        //Log.d(TAG,onResults); // $ NON-NLS-1 $
        ArrayList的&LT;字符串&GT;比赛= results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        //比赛是语音识别引擎的返回值
        //使用任何你想做的这些值
    }

    @覆盖
    公共无效onRmsChanged(浮动rmsdB)
    {
    }
}
 

编辑2015年2月7日:成立code从这个问题的答案由 ZakiMak 和<一HREF =htt​​p://stackoverflow.com/users/2853423/born-to-win>生而成功到code在这个答案,使这一个更完整。

I'm currently looking into getting a career with JAVA and have decided to start by building an app. I have this code right here that I am using to trigger Speech Recognition.

public class MainActivity extends Activity implements OnClickListener{

private static final int VR_REQUEST = 999;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";  
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    Button speechBtn = (Button) findViewById(R.id.speech_btn);
    wordList = (ListView) findViewById (R.id.word_list);
    PackageManager packManager= getPackageManager();
    List<ResolveInfo> intActivities = packManager.queryIntentActivities
                    (new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
    if (intActivities.size() !=0){
        speechBtn.setOnClickListener(this);
    } else {
        speechBtn.setEnabled(false);
        Toast.makeText(this,"Oops - Speech Recognition Not Supported!", 
                                             Toast.LENGTH_LONG).show();
        }       
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
    // Inflate the menu; this adds items to the action bar if it is present.
    getMenuInflater().inflate(R.menu.main, menu);
    return true;
}
public void onClick(View v){
   if (v.getId() == R.id.speech_btn) {
    listenToSpeech();
   }
}
    private void listenToSpeech() {
    //start the speech recognition intent passing required data
    Intent listenIntent = 
                     new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    //indicate package
    listenIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                        getClass().getPackage().getName());
    //message to display while listening
    listenIntent.putExtra(RecognizerIntent.EXTRA_PROMPT, "Say a word!");
    //set speech model
    listenIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, 
                                 RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    //specify number of results to retrieve
    listenIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 10);
    //start listening
    startActivityForResult(listenIntent, VR_REQUEST);
}
    @Override
    protected void onActivityResult(int requestCode, 
                                             int resultCode, Intent data) {
        //check speech recognition result 
        if (requestCode == VR_REQUEST && resultCode == RESULT_OK) {
    //store the returned word list as an ArrayList
    ArrayList<String> suggestedWords = data.
                     getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
    //set the retrieved list to display in the ListView 
            //using an ArrayAdapter
    wordList.setAdapter(new ArrayAdapter<String> 
                                       (this, R.layout.word, suggestedWords));
}
    //this detects which one the user clicks 
    wordList.setOnItemClickListener(new OnItemClickListener(){
        //click listener for items within list
        public void onItemClick(AdapterView<?> parent, 
                                           View view, int position, long id){
        //cast the 
        TextView wordView = (TextView)
        //retrive the chosen word
        String wordChosen= (String) wordView.
        //output for debugging
        Log.v(LOG_TAG, "chosen:" +wordChosen);
     }});
        super.onActivityResult(requestCode, resultCode, data);
  }
}

In this app the user presses a button and gets displayed with the Google Voice Input screen where you can click a button (it actually goes automatically) and you can speak, it will stop and it will display it. I don't want that window to pop up at all though. Instead just let the user click the button and be able to speak and let the app stop and display the text automatically (it already does that).

PLEASE! I understand that there are already answers on the form showing how to do this, in fact a user name JEEZ posted some code right here.

I don't know if I understood where to put this in my project file. I AM A NOOB! If anyone could help clarify this I would GREATLY appreciate your help.

Here is my code:

package com.example.speechrecognizertest;

import android.os.Bundle;
import java.util.ArrayList;
import java.util.List;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.Toast;
import android.widget.TextView;
import android.app.Activity;
import android.view.Menu;

public class MainActivity extends Activity {

private static final int VR_REQUEST = 999;
public static final String TAG = null;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";
private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent; 
private boolean mIslistening; 

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    Button speechBtn = (Button) findViewById(R.id.speech_btn);
    wordList = (ListView) findViewById(R.id.word_list);
    PackageManager packManager = getPackageManager();
    List<ResolveInfo> intActivities = packManager.queryIntentActivities(
            new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
    mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());
    if (!mIslistening)
    {
        mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
    } else {
        speechBtn.setEnabled(false);
        Toast.makeText(this, "Oops - Speech Recognition Not Supported!",
                Toast.LENGTH_LONG).show();
    }
}


@Override
protected void onDestroy() {
    // TODO Auto-generated method stub
    super.onDestroy();
}


@Override
public boolean onCreateOptionsMenu(Menu menu) {
    // Inflate the menu; this adds items to the action bar if it is present.
    getMenuInflater().inflate(R.menu.main, menu);
    return true;
}



protected class SpeechRecognitionListener implements RecognitionListener
{

    @Override
    public void onBeginningOfSpeech()
    {               
        //Log.d(TAG, "onBeginingOfSpeech"); 
    }

    @Override
    public void onBufferReceived(byte[] buffer)
    {

    }

    @Override
    public void onEndOfSpeech()
    {
        //Log.d(TAG, "onEndOfSpeech");
     }

    @Override
    public void onError(int error)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG, "error = " + error);
    }

    @Override
    public void onEvent(int eventType, Bundle params)
    {

    }

    @Override
    public void onPartialResults(Bundle partialResults)
    {

    }

    @Override
    public void onReadyForSpeech(Bundle params)
    {
        Log.d(TAG, "OnReadyForSpeech"); //$NON-NLS-1$
    }

    @Override
    public void onResults(Bundle results)
    {
        //Log.d(TAG, "onResults"); //$NON-NLS-1$
        ArrayList<String> suggestedWords =      results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        // matches are the return values of speech recognition engine
        // Use these values for whatever you wish to do

        wordList.setAdapter(new ArrayAdapter<String>(this, R.layout.word, suggestedWords));



    }

    @Override
    public void onRmsChanged(float rmsdB)
    {

    }

}

解决方案

AndroidManifest.xml

Add the following permission:

<uses-permission android:name="android.permission.RECORD_AUDIO" />

class members

private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent; 
private boolean mIslistening; 

In onCreate

@Override
protected void onCreate(Bundle savedInstanceState)
{
    super.onCreate(savedInstanceState);
    .........
    .........
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
    mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());


    SpeechRecognitionListener listener = new SpeechRecognitionListener();
    mSpeechRecognizer.setRecognitionListener(listener);

}   

in your button listener just use this code

if (!mIsListening)
{
    mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}

In onDestroy

if (mSpeechRecognizer != null)
{
        mSpeechRecognizer.destroy();
}

Inside your activity create the inner class

protected class SpeechRecognitionListener implements RecognitionListener
{

    @Override
    public void onBeginningOfSpeech()
    {               
        //Log.d(TAG, "onBeginingOfSpeech"); 
    }

    @Override
    public void onBufferReceived(byte[] buffer)
    {

    }

    @Override
    public void onEndOfSpeech()
    {
        //Log.d(TAG, "onEndOfSpeech");
     }

    @Override
    public void onError(int error)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG, "error = " + error);
    }

    @Override
    public void onEvent(int eventType, Bundle params)
    {

    }

    @Override
    public void onPartialResults(Bundle partialResults)
    {

    }

    @Override
    public void onReadyForSpeech(Bundle params)
    {
        Log.d(TAG, "onReadyForSpeech"); //$NON-NLS-1$
    }

    @Override
    public void onResults(Bundle results)
    {
        //Log.d(TAG, "onResults"); //$NON-NLS-1$
        ArrayList<String> matches = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        // matches are the return values of speech recognition engine
        // Use these values for whatever you wish to do
    }

    @Override
    public void onRmsChanged(float rmsdB)
    {
    }
}

EDIT 2015-02-07: Incorporated code from the answers to this question by ZakiMak and Born To Win into the code in this answer to make this one more complete.

这篇关于Android的语音识别应用程序不拉网的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆