M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@ -0,0 +1,87 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.os.Bundle;
/**
* Listener for speech recognition events, used with RecognitionService.
* This gives you both the final recognition results, as well as various
* intermediate events that can be used to show visual feedback to the user.
* {@hide}
*/
oneway interface IRecognitionListener {
/**
* Called when the endpointer is ready for the user to start speaking.
*
* @param params parameters set by the recognition service. Reserved for future use.
*/
void onReadyForSpeech(in Bundle params);
/**
* The user has started to speak.
*/
void onBeginningOfSpeech();
/**
* The sound level in the audio stream has changed.
*
* @param rmsdB the new RMS dB value
*/
void onRmsChanged(in float rmsdB);
/**
* More sound has been received.
*
* @param buffer the byte buffer containing a sequence of 16-bit shorts.
*/
void onBufferReceived(in byte[] buffer);
/**
* Called after the user stops speaking.
*/
void onEndOfSpeech();
/**
* A network or recognition error occurred.
*
* @param error code is defined in {@link SpeechRecognizer}
*/
void onError(in int error);
/**
* Called when recognition results are ready.
*
* @param results a Bundle containing the most likely results (N-best list).
*/
void onResults(in Bundle results);
/**
* Called when recognition partial results are ready.
*
* @param results a Bundle containing the current most likely result.
*/
void onPartialResults(in Bundle results);
/**
* Reserved for adding future events.
*
* @param eventType the type of the occurred event
* @param params a Bundle containing the passed parameters
*/
void onEvent(in int eventType, in Bundle params);
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.os.Bundle;
import android.content.Intent;
import android.speech.IRecognitionListener;
/**
* A Service interface to speech recognition. Call startListening when
* you want to begin capturing audio; RecognitionService will automatically
* determine when the user has finished speaking, stream the audio to the
* recognition servers, and notify you when results are ready. In most of the cases,
* this class should not be used directly, instead use {@link SpeechRecognizer} for
* accessing recognition service.
* {@hide}
*/
oneway interface IRecognitionService {
/**
* Starts listening for speech. Please note that the recognition service supports
* one listener only, therefore, if this function is called from two different threads,
* only the latest one will get the notifications
*
* @param recognizerIntent the intent from which the invocation occurred. Additionally,
* this intent can contain extra parameters to manipulate the behavior of the recognition
* client. For more information see {@link RecognizerIntent}.
* @param listener to receive callbacks, note that this must be non-null
*/
void startListening(in Intent recognizerIntent, in IRecognitionListener listener);
/**
* Stops listening for speech. Speech captured so far will be recognized as
* if the user had stopped speaking at this point. The function has no effect unless it
* is called during the speech capturing.
*
* @param listener to receive callbacks, note that this must be non-null
*/
void stopListening(in IRecognitionListener listener);
/**
* Cancels the speech recognition.
*
* @param listener to receive callbacks, note that this must be non-null
*/
void cancel(in IRecognitionListener listener);
}

View File

@ -0,0 +1,98 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.content.Intent;
import android.os.Bundle;
/**
* Used for receiving notifications from the SpeechRecognizer when the
* recognition related events occur. All the callbacks are executed on the
* Application main thread.
*/
public interface RecognitionListener {
/**
* Called when the endpointer is ready for the user to start speaking.
*
* @param params parameters set by the recognition service. Reserved for future use.
*/
void onReadyForSpeech(Bundle params);
/**
* The user has started to speak.
*/
void onBeginningOfSpeech();
/**
* The sound level in the audio stream has changed. There is no guarantee that this method will
* be called.
*
* @param rmsdB the new RMS dB value
*/
void onRmsChanged(float rmsdB);
/**
* More sound has been received. The purpose of this function is to allow giving feedback to the
* user regarding the captured audio. There is no guarantee that this method will be called.
*
* @param buffer a buffer containing a sequence of big-endian 16-bit integers representing a
* single channel audio stream. The sample rate is implementation dependent.
*/
void onBufferReceived(byte[] buffer);
/**
* Called after the user stops speaking.
*/
void onEndOfSpeech();
/**
* A network or recognition error occurred.
*
* @param error code is defined in {@link SpeechRecognizer}
*/
void onError(int error);
/**
* Called when recognition results are ready.
*
* @param results the recognition results. To retrieve the results in {@code
* ArrayList<String>} format use {@link Bundle#getStringArrayList(String)} with
* {@link SpeechRecognizer#RESULTS_RECOGNITION} as a parameter
*/
void onResults(Bundle results);
/**
* Called when partial recognition results are available. The callback might be called at any
* time between {@link #onBeginningOfSpeech()} and {@link #onResults(Bundle)} when partial
* results are ready. This method may be called zero, one or multiple times for each call to
* {@link SpeechRecognizer#startListening(Intent)}, depending on the speech recognition
* service implementation. To request partial results, use
* {@link RecognizerIntent#EXTRA_PARTIAL_RESULTS}
*
* @param partialResults the returned results. To retrieve the results in
* ArrayList<String> format use {@link Bundle#getStringArrayList(String)} with
* {@link SpeechRecognizer#RESULTS_RECOGNITION} as a parameter
*/
void onPartialResults(Bundle partialResults);
/**
* Reserved for adding future events.
*
* @param eventType the type of the occurred event
* @param params a Bundle containing the passed parameters
*/
void onEvent(int eventType, Bundle params);
}

View File

@ -0,0 +1,343 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package android.speech;
import android.annotation.SdkConstant;
import android.annotation.SdkConstant.SdkConstantType;
import android.app.Service;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.os.Bundle;
import android.os.Handler;
import android.os.IBinder;
import android.os.Message;
import android.os.RemoteException;
import android.util.Log;
/**
* This class provides a base class for recognition service implementations. This class should be
* extended only in case you wish to implement a new speech recognizer. Please note that the
* implementation of this service is stateless.
*/
public abstract class RecognitionService extends Service {
/**
* The {@link Intent} that must be declared as handled by the service.
*/
@SdkConstant(SdkConstantType.SERVICE_ACTION)
public static final String SERVICE_INTERFACE = "android.speech.RecognitionService";
/**
* Name under which a RecognitionService component publishes information about itself.
* This meta-data should reference an XML resource containing a
* <code>&lt;{@link android.R.styleable#RecognitionService recognition-service}&gt;</code> tag.
*/
public static final String SERVICE_META_DATA = "android.speech";
/** Log messages identifier */
private static final String TAG = "RecognitionService";
/** Debugging flag */
private static final boolean DBG = false;
/** Binder of the recognition service */
private RecognitionServiceBinder mBinder = new RecognitionServiceBinder(this);
/**
* The current callback of an application that invoked the
* {@link RecognitionService#onStartListening(Intent, Callback)} method
*/
private Callback mCurrentCallback = null;
private static final int MSG_START_LISTENING = 1;
private static final int MSG_STOP_LISTENING = 2;
private static final int MSG_CANCEL = 3;
private static final int MSG_RESET = 4;
private final Handler mHandler = new Handler() {
@Override
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_START_LISTENING:
StartListeningArgs args = (StartListeningArgs) msg.obj;
dispatchStartListening(args.mIntent, args.mListener);
break;
case MSG_STOP_LISTENING:
dispatchStopListening((IRecognitionListener) msg.obj);
break;
case MSG_CANCEL:
dispatchCancel((IRecognitionListener) msg.obj);
break;
case MSG_RESET:
dispatchClearCallback();
break;
}
}
};
private void dispatchStartListening(Intent intent, IRecognitionListener listener) {
if (mCurrentCallback == null) {
if (DBG) Log.d(TAG, "created new mCurrentCallback, listener = " + listener.asBinder());
mCurrentCallback = new Callback(listener);
RecognitionService.this.onStartListening(intent, mCurrentCallback);
} else {
try {
listener.onError(SpeechRecognizer.ERROR_RECOGNIZER_BUSY);
} catch (RemoteException e) {
Log.d(TAG, "onError call from startListening failed");
}
Log.i(TAG, "concurrent startListening received - ignoring this call");
}
}
private void dispatchStopListening(IRecognitionListener listener) {
try {
if (mCurrentCallback == null) {
listener.onError(SpeechRecognizer.ERROR_CLIENT);
Log.w(TAG, "stopListening called with no preceding startListening - ignoring");
} else if (mCurrentCallback.mListener.asBinder() != listener.asBinder()) {
listener.onError(SpeechRecognizer.ERROR_RECOGNIZER_BUSY);
Log.w(TAG, "stopListening called by other caller than startListening - ignoring");
} else { // the correct state
RecognitionService.this.onStopListening(mCurrentCallback);
}
} catch (RemoteException e) { // occurs if onError fails
Log.d(TAG, "onError call from stopListening failed");
}
}
private void dispatchCancel(IRecognitionListener listener) {
if (mCurrentCallback == null) {
if (DBG) Log.d(TAG, "cancel called with no preceding startListening - ignoring");
} else if (mCurrentCallback.mListener.asBinder() != listener.asBinder()) {
Log.w(TAG, "cancel called by client who did not call startListening - ignoring");
} else { // the correct state
RecognitionService.this.onCancel(mCurrentCallback);
mCurrentCallback = null;
if (DBG) Log.d(TAG, "canceling - setting mCurrentCallback to null");
}
}
private void dispatchClearCallback() {
mCurrentCallback = null;
}
private class StartListeningArgs {
public final Intent mIntent;
public final IRecognitionListener mListener;
public StartListeningArgs(Intent intent, IRecognitionListener listener) {
this.mIntent = intent;
this.mListener = listener;
}
}
/**
* Checks whether the caller has sufficient permissions
*
* @param listener to send the error message to in case of error
* @return {@code true} if the caller has enough permissions, {@code false} otherwise
*/
private boolean checkPermissions(IRecognitionListener listener) {
if (DBG) Log.d(TAG, "checkPermissions");
if (RecognitionService.this.checkCallingOrSelfPermission(android.Manifest.permission.
RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED) {
return true;
}
try {
Log.e(TAG, "call for recognition service without RECORD_AUDIO permissions");
listener.onError(SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS);
} catch (RemoteException re) {
Log.e(TAG, "sending ERROR_INSUFFICIENT_PERMISSIONS message failed", re);
}
return false;
}
/**
* Notifies the service that it should start listening for speech.
*
* @param recognizerIntent contains parameters for the recognition to be performed. The intent
* may also contain optional extras, see {@link RecognizerIntent}. If these values are
* not set explicitly, default values should be used by the recognizer.
* @param listener that will receive the service's callbacks
*/
protected abstract void onStartListening(Intent recognizerIntent, Callback listener);
/**
* Notifies the service that it should cancel the speech recognition.
*/
protected abstract void onCancel(Callback listener);
/**
* Notifies the service that it should stop listening for speech. Speech captured so far should
* be recognized as if the user had stopped speaking at this point. This method is only called
* if the application calls it explicitly.
*/
protected abstract void onStopListening(Callback listener);
@Override
public final IBinder onBind(final Intent intent) {
if (DBG) Log.d(TAG, "onBind, intent=" + intent);
return mBinder;
}
@Override
public void onDestroy() {
if (DBG) Log.d(TAG, "onDestroy");
mCurrentCallback = null;
mBinder.clearReference();
super.onDestroy();
}
/**
* This class receives callbacks from the speech recognition service and forwards them to the
* user. An instance of this class is passed to the
* {@link RecognitionService#onStartListening(Intent, Callback)} method. Recognizers may call
* these methods on any thread.
*/
public class Callback {
private final IRecognitionListener mListener;
private Callback(IRecognitionListener listener) {
mListener = listener;
}
/**
* The service should call this method when the user has started to speak.
*/
public void beginningOfSpeech() throws RemoteException {
if (DBG) Log.d(TAG, "beginningOfSpeech");
mListener.onBeginningOfSpeech();
}
/**
* The service should call this method when sound has been received. The purpose of this
* function is to allow giving feedback to the user regarding the captured audio.
*
* @param buffer a buffer containing a sequence of big-endian 16-bit integers representing a
* single channel audio stream. The sample rate is implementation dependent.
*/
public void bufferReceived(byte[] buffer) throws RemoteException {
mListener.onBufferReceived(buffer);
}
/**
* The service should call this method after the user stops speaking.
*/
public void endOfSpeech() throws RemoteException {
mListener.onEndOfSpeech();
}
/**
* The service should call this method when a network or recognition error occurred.
*
* @param error code is defined in {@link SpeechRecognizer}
*/
public void error(int error) throws RemoteException {
Message.obtain(mHandler, MSG_RESET).sendToTarget();
mListener.onError(error);
}
/**
* The service should call this method when partial recognition results are available. This
* method can be called at any time between {@link #beginningOfSpeech()} and
* {@link #results(Bundle)} when partial results are ready. This method may be called zero,
* one or multiple times for each call to {@link SpeechRecognizer#startListening(Intent)},
* depending on the speech recognition service implementation.
*
* @param partialResults the returned results. To retrieve the results in
* ArrayList&lt;String&gt; format use {@link Bundle#getStringArrayList(String)} with
* {@link SpeechRecognizer#RESULTS_RECOGNITION} as a parameter
*/
public void partialResults(Bundle partialResults) throws RemoteException {
mListener.onPartialResults(partialResults);
}
/**
* The service should call this method when the endpointer is ready for the user to start
* speaking.
*
* @param params parameters set by the recognition service. Reserved for future use.
*/
public void readyForSpeech(Bundle params) throws RemoteException {
mListener.onReadyForSpeech(params);
}
/**
* The service should call this method when recognition results are ready.
*
* @param results the recognition results. To retrieve the results in {@code
* ArrayList&lt;String&gt;} format use {@link Bundle#getStringArrayList(String)} with
* {@link SpeechRecognizer#RESULTS_RECOGNITION} as a parameter
*/
public void results(Bundle results) throws RemoteException {
Message.obtain(mHandler, MSG_RESET).sendToTarget();
mListener.onResults(results);
}
/**
* The service should call this method when the sound level in the audio stream has changed.
* There is no guarantee that this method will be called.
*
* @param rmsdB the new RMS dB value
*/
public void rmsChanged(float rmsdB) throws RemoteException {
mListener.onRmsChanged(rmsdB);
}
}
/** Binder of the recognition service */
private static class RecognitionServiceBinder extends IRecognitionService.Stub {
private RecognitionService mInternalService;
public RecognitionServiceBinder(RecognitionService service) {
mInternalService = service;
}
public void startListening(Intent recognizerIntent, IRecognitionListener listener) {
if (DBG) Log.d(TAG, "startListening called by:" + listener.asBinder());
if (mInternalService != null && mInternalService.checkPermissions(listener)) {
mInternalService.mHandler.sendMessage(Message.obtain(mInternalService.mHandler,
MSG_START_LISTENING, mInternalService.new StartListeningArgs(
recognizerIntent, listener)));
}
}
public void stopListening(IRecognitionListener listener) {
if (DBG) Log.d(TAG, "stopListening called by:" + listener.asBinder());
if (mInternalService != null && mInternalService.checkPermissions(listener)) {
mInternalService.mHandler.sendMessage(Message.obtain(mInternalService.mHandler,
MSG_STOP_LISTENING, listener));
}
}
public void cancel(IRecognitionListener listener) {
if (DBG) Log.d(TAG, "cancel called by:" + listener.asBinder());
if (mInternalService != null && mInternalService.checkPermissions(listener)) {
mInternalService.mHandler.sendMessage(Message.obtain(mInternalService.mHandler,
MSG_CANCEL, listener));
}
}
public void clearReference() {
mInternalService = null;
}
}
}

View File

@ -0,0 +1,325 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import java.util.ArrayList;
import android.app.Activity;
import android.content.ActivityNotFoundException;
import android.content.BroadcastReceiver;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.os.Bundle;
/**
* Constants for supporting speech recognition through starting an {@link Intent}
*/
public class RecognizerIntent {
/**
* The extra key used in an intent to the speech recognizer for voice search. Not
* generally to be used by developers. The system search dialog uses this, for example,
* to set a calling package for identification by a voice search API. If this extra
* is set by anyone but the system process, it should be overridden by the voice search
* implementation.
*/
public final static String EXTRA_CALLING_PACKAGE = "calling_package";
private RecognizerIntent() {
// Not for instantiating.
}
/**
* Starts an activity that will prompt the user for speech and sends it through a
* speech recognizer. The results will be returned via activity results (in
* {@link Activity#onActivityResult}, if you start the intent using
* {@link Activity#startActivityForResult(Intent, int)}), or forwarded via a PendingIntent
* if one is provided.
*
* <p>Starting this intent with just {@link Activity#startActivity(Intent)} is not supported.
* You must either use {@link Activity#startActivityForResult(Intent, int)}, or provide a
* PendingIntent, to receive recognition results.
*
* <p>Required extras:
* <ul>
* <li>{@link #EXTRA_LANGUAGE_MODEL}
* </ul>
*
* <p>Optional extras:
* <ul>
* <li>{@link #EXTRA_PROMPT}
* <li>{@link #EXTRA_LANGUAGE}
* <li>{@link #EXTRA_MAX_RESULTS}
* <li>{@link #EXTRA_RESULTS_PENDINGINTENT}
* <li>{@link #EXTRA_RESULTS_PENDINGINTENT_BUNDLE}
* </ul>
*
* <p> Result extras (returned in the result, not to be specified in the request):
* <ul>
* <li>{@link #EXTRA_RESULTS}
* </ul>
*
* <p>NOTE: There may not be any applications installed to handle this action, so you should
* make sure to catch {@link ActivityNotFoundException}.
*/
public static final String ACTION_RECOGNIZE_SPEECH = "android.speech.action.RECOGNIZE_SPEECH";
/**
* Starts an activity that will prompt the user for speech, sends it through a
* speech recognizer, and invokes and displays a web search result.
*
* <p>Required extras:
* <ul>
* <li>{@link #EXTRA_LANGUAGE_MODEL}
* </ul>
*
* <p>Optional extras:
* <ul>
* <li>{@link #EXTRA_PROMPT}
* <li>{@link #EXTRA_LANGUAGE}
* <li>{@link #EXTRA_MAX_RESULTS}
* <li>{@link #EXTRA_PARTIAL_RESULTS}
* </ul>
*
* <p> Result extras (returned in the result, not to be specified in the request):
* <ul>
* <li>{@link #EXTRA_RESULTS}
* </ul>
*
* <p>NOTE: There may not be any applications installed to handle this action, so you should
* make sure to catch {@link ActivityNotFoundException}.
*/
public static final String ACTION_WEB_SEARCH = "android.speech.action.WEB_SEARCH";
/**
* The minimum length of an utterance. We will not stop recording before this amount of time.
*
* Note that it is extremely rare you'd want to specify this value in an intent. If you don't
* have a very good reason to change these, you should leave them as they are. Note also that
* certain values may cause undesired or unexpected results - use judiciously! Additionally,
* depending on the recognizer implementation, these values may have no effect.
*/
public static final String EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS =
"android.speech.extras.SPEECH_INPUT_MINIMUM_LENGTH_MILLIS";
/**
* The amount of time that it should take after we stop hearing speech to consider the input
* complete.
*
* Note that it is extremely rare you'd want to specify this value in an intent. If
* you don't have a very good reason to change these, you should leave them as they are. Note
* also that certain values may cause undesired or unexpected results - use judiciously!
* Additionally, depending on the recognizer implementation, these values may have no effect.
*/
public static final String EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS =
"android.speech.extras.SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS";
/**
* The amount of time that it should take after we stop hearing speech to consider the input
* possibly complete. This is used to prevent the endpointer cutting off during very short
* mid-speech pauses.
*
* Note that it is extremely rare you'd want to specify this value in an intent. If
* you don't have a very good reason to change these, you should leave them as they are. Note
* also that certain values may cause undesired or unexpected results - use judiciously!
* Additionally, depending on the recognizer implementation, these values may have no effect.
*/
public static final String EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS =
"android.speech.extras.SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS";
/**
* Informs the recognizer which speech model to prefer when performing
* {@link #ACTION_RECOGNIZE_SPEECH}. The recognizer uses this
* information to fine tune the results. This extra is required. Activities implementing
* {@link #ACTION_RECOGNIZE_SPEECH} may interpret the values as they see fit.
*
* @see #LANGUAGE_MODEL_FREE_FORM
* @see #LANGUAGE_MODEL_WEB_SEARCH
*/
public static final String EXTRA_LANGUAGE_MODEL = "android.speech.extra.LANGUAGE_MODEL";
/**
* Use a language model based on free-form speech recognition. This is a value to use for
* {@link #EXTRA_LANGUAGE_MODEL}.
* @see #EXTRA_LANGUAGE_MODEL
*/
public static final String LANGUAGE_MODEL_FREE_FORM = "free_form";
/**
* Use a language model based on web search terms. This is a value to use for
* {@link #EXTRA_LANGUAGE_MODEL}.
* @see #EXTRA_LANGUAGE_MODEL
*/
public static final String LANGUAGE_MODEL_WEB_SEARCH = "web_search";
/** Optional text prompt to show to the user when asking them to speak. */
public static final String EXTRA_PROMPT = "android.speech.extra.PROMPT";
/**
* Optional IETF language tag (as defined by BCP 47), for example "en-US". This tag informs the
* recognizer to perform speech recognition in a language different than the one set in the
* {@link java.util.Locale#getDefault()}.
*/
public static final String EXTRA_LANGUAGE = "android.speech.extra.LANGUAGE";
/**
* Optional limit on the maximum number of results to return. If omitted the recognizer
* will choose how many results to return. Must be an integer.
*/
public static final String EXTRA_MAX_RESULTS = "android.speech.extra.MAX_RESULTS";
/**
* Optional boolean to indicate whether partial results should be returned by the recognizer
* as the user speaks (default is false). The server may ignore a request for partial
* results in some or all cases.
*/
public static final String EXTRA_PARTIAL_RESULTS = "android.speech.extra.PARTIAL_RESULTS";
/**
* When the intent is {@link #ACTION_RECOGNIZE_SPEECH}, the speech input activity will
* return results to you via the activity results mechanism. Alternatively, if you use this
* extra to supply a PendingIntent, the results will be added to its bundle and the
* PendingIntent will be sent to its target.
*/
public static final String EXTRA_RESULTS_PENDINGINTENT =
"android.speech.extra.RESULTS_PENDINGINTENT";
/**
* If you use {@link #EXTRA_RESULTS_PENDINGINTENT} to supply a forwarding intent, you can
* also use this extra to supply additional extras for the final intent. The search results
* will be added to this bundle, and the combined bundle will be sent to the target.
*/
public static final String EXTRA_RESULTS_PENDINGINTENT_BUNDLE =
"android.speech.extra.RESULTS_PENDINGINTENT_BUNDLE";
/** Result code returned when no matches are found for the given speech */
public static final int RESULT_NO_MATCH = Activity.RESULT_FIRST_USER;
/** Result code returned when there is a generic client error */
public static final int RESULT_CLIENT_ERROR = Activity.RESULT_FIRST_USER + 1;
/** Result code returned when the recognition server returns an error */
public static final int RESULT_SERVER_ERROR = Activity.RESULT_FIRST_USER + 2;
/** Result code returned when a network error was encountered */
public static final int RESULT_NETWORK_ERROR = Activity.RESULT_FIRST_USER + 3;
/** Result code returned when an audio error was encountered */
public static final int RESULT_AUDIO_ERROR = Activity.RESULT_FIRST_USER + 4;
/**
* An ArrayList&lt;String&gt; of the recognition results when performing
* {@link #ACTION_RECOGNIZE_SPEECH}. Returned in the results; not to be specified in the
* recognition request. Only present when {@link Activity#RESULT_OK} is returned in
* an activity result. In a PendingIntent, the lack of this extra indicates failure.
*/
public static final String EXTRA_RESULTS = "android.speech.extra.RESULTS";
/**
* Returns the broadcast intent to fire with
* {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, Bundle)}
* to receive details from the package that implements voice search.
* <p>
* This is based on the value specified by the voice search {@link Activity} in
* {@link #DETAILS_META_DATA}, and if this is not specified, will return null. Also if there
* is no chosen default to resolve for {@link #ACTION_WEB_SEARCH}, this will return null.
* <p>
* If an intent is returned and is fired, a {@link Bundle} of extras will be returned to the
* provided result receiver, and should ideally contain values for
* {@link #EXTRA_LANGUAGE_PREFERENCE} and {@link #EXTRA_SUPPORTED_LANGUAGES}.
* <p>
* (Whether these are actually provided is up to the particular implementation. It is
* recommended that {@link Activity}s implementing {@link #ACTION_WEB_SEARCH} provide this
* information, but it is not required.)
*
* @param context a context object
* @return the broadcast intent to fire or null if not available
*/
public static final Intent getVoiceDetailsIntent(Context context) {
Intent voiceSearchIntent = new Intent(ACTION_WEB_SEARCH);
ResolveInfo ri = context.getPackageManager().resolveActivity(
voiceSearchIntent, PackageManager.GET_META_DATA);
if (ri == null || ri.activityInfo == null || ri.activityInfo.metaData == null) return null;
String className = ri.activityInfo.metaData.getString(DETAILS_META_DATA);
if (className == null) return null;
Intent detailsIntent = new Intent(ACTION_GET_LANGUAGE_DETAILS);
detailsIntent.setComponent(new ComponentName(ri.activityInfo.packageName, className));
return detailsIntent;
}
/**
* Meta-data name under which an {@link Activity} implementing {@link #ACTION_WEB_SEARCH} can
* use to expose the class name of a {@link BroadcastReceiver} which can respond to request for
* more information, from any of the broadcast intents specified in this class.
* <p>
* Broadcast intents can be directed to the class name specified in the meta-data by creating
* an {@link Intent}, setting the component with
* {@link Intent#setComponent(android.content.ComponentName)}, and using
* {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, android.os.Bundle)}
* with another {@link BroadcastReceiver} which can receive the results.
* <p>
* The {@link #getVoiceDetailsIntent(Context)} method is provided as a convenience to create
* a broadcast intent based on the value of this meta-data, if available.
* <p>
* This is optional and not all {@link Activity}s which implement {@link #ACTION_WEB_SEARCH}
* are required to implement this. Thus retrieving this meta-data may be null.
*/
public static final String DETAILS_META_DATA = "android.speech.DETAILS";
/**
* A broadcast intent which can be fired to the {@link BroadcastReceiver} component specified
* in the meta-data defined in the {@link #DETAILS_META_DATA} meta-data of an
* {@link Activity} satisfying {@link #ACTION_WEB_SEARCH}.
* <p>
* When fired with
* {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, android.os.Bundle)},
* a {@link Bundle} of extras will be returned to the provided result receiver, and should
* ideally contain values for {@link #EXTRA_LANGUAGE_PREFERENCE} and
* {@link #EXTRA_SUPPORTED_LANGUAGES}.
* <p>
* (Whether these are actually provided is up to the particular implementation. It is
* recommended that {@link Activity}s implementing {@link #ACTION_WEB_SEARCH} provide this
* information, but it is not required.)
*/
public static final String ACTION_GET_LANGUAGE_DETAILS =
"android.speech.action.GET_LANGUAGE_DETAILS";
/**
* Specify this boolean extra in a broadcast of {@link #ACTION_GET_LANGUAGE_DETAILS} to
* indicate that only the current language preference is needed in the response. This
* avoids any additional computation if all you need is {@link #EXTRA_LANGUAGE_PREFERENCE}
* in the response.
*/
public static final String EXTRA_ONLY_RETURN_LANGUAGE_PREFERENCE =
"android.speech.extra.ONLY_RETURN_LANGUAGE_PREFERENCE";
/**
* The key to the extra in the {@link Bundle} returned by {@link #ACTION_GET_LANGUAGE_DETAILS}
* which is a {@link String} that represents the current language preference this user has
* specified - a locale string like "en-US".
*/
public static final String EXTRA_LANGUAGE_PREFERENCE =
"android.speech.extra.LANGUAGE_PREFERENCE";
/**
* The key to the extra in the {@link Bundle} returned by {@link #ACTION_GET_LANGUAGE_DETAILS}
* which is an {@link ArrayList} of {@link String}s that represents the languages supported by
* this implementation of voice recognition - a list of strings like "en-US", "cmn-Hans-CN",
* etc.
*/
public static final String EXTRA_SUPPORTED_LANGUAGES =
"android.speech.extra.SUPPORTED_LANGUAGES";
}

View File

@ -0,0 +1,133 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.os.Bundle;
import java.util.ArrayList;
/**
* Constants for intents related to showing speech recognition results.
*
* These constants should not be needed for normal utilization of speech recognition. They
* would only be called if you wanted to trigger a view of voice search results in your
* application, or implemented if you wanted to offer a different view for voice search results
* with your application.
*
* The standard behavior here for someone receiving an {@link #ACTION_VOICE_SEARCH_RESULTS} is to
* first retrieve the list of {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS}, and use any provided
* HTML for that result in {@link #EXTRA_VOICE_SEARCH_RESULT_HTML}, if available, to display
* the search results. If that is not available, then the corresponding url for that result in
* {@link #EXTRA_VOICE_SEARCH_RESULT_URLS} should be used. And if even that is not available,
* then a search url should be constructed from the actual recognition result string.
*/
public class RecognizerResultsIntent {
private RecognizerResultsIntent() {
// Not for instantiating.
}
/**
* Intent that can be sent by implementations of voice search to display the results of
* a search in, for example, a web browser.
*
* This intent should always be accompanied by at least
* {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS}, and optionally but recommended,
* {@link #EXTRA_VOICE_SEARCH_RESULT_URLS}, and sometimes
* {@link #EXTRA_VOICE_SEARCH_RESULT_HTML} and
* {@link #EXTRA_VOICE_SEARCH_RESULT_HTML_BASE_URLS}.
*
* These are parallel arrays, where a recognition result string at index N of
* {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS} should be accompanied by a url to use for
* searching based on that string at index N of {@link #EXTRA_VOICE_SEARCH_RESULT_URLS},
* and, possibly, the full html to display for that result at index N of
* {@link #EXTRA_VOICE_SEARCH_RESULT_HTML}. If full html is provided, a base url (or
* list of base urls) should be provided with {@link #EXTRA_VOICE_SEARCH_RESULT_HTML_BASE_URLS}.
*/
public static final String ACTION_VOICE_SEARCH_RESULTS =
"android.speech.action.VOICE_SEARCH_RESULTS";
/**
* The key to an extra {@link ArrayList} of {@link String}s that contains the list of
* recognition alternates from voice search, in order from highest to lowest confidence.
*/
public static final String EXTRA_VOICE_SEARCH_RESULT_STRINGS =
"android.speech.extras.VOICE_SEARCH_RESULT_STRINGS";
/**
* The key to an extra {@link ArrayList} of {@link String}s that contains the search urls
* to use, if available, for the recognition alternates provided in
* {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS}. This list should always be the same size as the
* one provided in {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS} - if a result cannot provide a
* search url, that entry in this ArrayList should be <code>null</code>, and the implementor of
* {@link #ACTION_VOICE_SEARCH_RESULTS} should execute a search of its own choosing,
* based on the recognition result string.
*/
public static final String EXTRA_VOICE_SEARCH_RESULT_URLS =
"android.speech.extras.VOICE_SEARCH_RESULT_URLS";
/**
* The key to an extra {@link ArrayList} of {@link String}s that contains the html content to
* use, if available, for the recognition alternates provided in
* {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS}. This list should always be the same size as the
* one provided in {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS} - if a result cannot provide
* html, that entry in this list should be <code>null</code>, and the implementor of
* {@link #ACTION_VOICE_SEARCH_RESULTS} should back off to the corresponding url provided in
* {@link #EXTRA_VOICE_SEARCH_RESULT_URLS}, if available, or else should execute a search of
* its own choosing, based on the recognition result string.
*
* Currently this html content should be expected in the form of a uri with scheme
* {@link #URI_SCHEME_INLINE} for the Browser. In the future this may change to a "content://"
* uri or some other identifier. Anyone who reads this extra should confirm that a result is
* in fact an "inline:" uri and back off to the urls or strings gracefully if it is not, thus
* maintaining future backwards compatibility if this changes.
*/
public static final String EXTRA_VOICE_SEARCH_RESULT_HTML =
"android.speech.extras.VOICE_SEARCH_RESULT_HTML";
/**
* The key to an extra {@link ArrayList} of {@link String}s that contains the base url to
* assume when interpreting html provided in {@link #EXTRA_VOICE_SEARCH_RESULT_HTML}.
*
* A list of size 1 may be provided to apply the same base url to all html results.
* A list of the same size as {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS} may be provided
* to apply different base urls to each different html result in the
* {@link #EXTRA_VOICE_SEARCH_RESULT_HTML} list.
*/
public static final String EXTRA_VOICE_SEARCH_RESULT_HTML_BASE_URLS =
"android.speech.extras.VOICE_SEARCH_RESULT_HTML_BASE_URLS";
/**
* The key to an extra {@link ArrayList} of {@link Bundle}s that contains key/value pairs.
* All the values and the keys are {@link String}s. Each key/value pair represents an extra HTTP
* header. The keys can't be the standard HTTP headers as they are set by the WebView.
*
* A list of size 1 may be provided to apply the same HTTP headers to all web results. A
* list of the same size as {@link #EXTRA_VOICE_SEARCH_RESULT_STRINGS} may be provided to
* apply different HTTP headers to each different web result in the list. These headers will
* only be used in the case that the url for a particular web result (from
* {@link #EXTRA_VOICE_SEARCH_RESULT_URLS}) is loaded.
*/
public static final String EXTRA_VOICE_SEARCH_RESULT_HTTP_HEADERS =
"android.speech.extras.EXTRA_VOICE_SEARCH_RESULT_HTTP_HEADERS";
/**
* The scheme used currently for html content in {@link #EXTRA_VOICE_SEARCH_RESULT_HTML}.
* Note that this should only be used in tandem with this particular extra; it should
* NOT be used for generic URIs such as those found in the data field of an Intent.
*/
public static final String URI_SCHEME_INLINE = "inline";
}

View File

@ -0,0 +1,480 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.ServiceConnection;
import android.content.pm.ResolveInfo;
import android.os.Bundle;
import android.os.Handler;
import android.os.IBinder;
import android.os.Looper;
import android.os.Message;
import android.os.RemoteException;
import android.provider.Settings;
import android.text.TextUtils;
import android.util.Log;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
/**
* This class provides access to the speech recognition service. This service allows access to the
* speech recognizer. Do not instantiate this class directly, instead, call
* {@link SpeechRecognizer#createSpeechRecognizer(Context)}. This class's methods must be
* invoked only from the main application thread. Please note that the application must have
* {@link android.Manifest.permission#RECORD_AUDIO} permission to use this class.
*/
public class SpeechRecognizer {
/** DEBUG value to enable verbose debug prints */
private final static boolean DBG = false;
/** Log messages identifier */
private static final String TAG = "SpeechRecognizer";
/**
* Used to retrieve an {@code ArrayList&lt;String&gt;} from the {@link Bundle} passed to the
* {@link RecognitionListener#onResults(Bundle)} and
* {@link RecognitionListener#onPartialResults(Bundle)} methods. These strings are the possible
* recognition results, where the first element is the most likely candidate.
*/
public static final String RESULTS_RECOGNITION = "results_recognition";
/** Network operation timed out. */
public static final int ERROR_NETWORK_TIMEOUT = 1;
/** Other network related errors. */
public static final int ERROR_NETWORK = 2;
/** Audio recording error. */
public static final int ERROR_AUDIO = 3;
/** Server sends error status. */
public static final int ERROR_SERVER = 4;
/** Other client side errors. */
public static final int ERROR_CLIENT = 5;
/** No speech input */
public static final int ERROR_SPEECH_TIMEOUT = 6;
/** No recognition result matched. */
public static final int ERROR_NO_MATCH = 7;
/** RecognitionService busy. */
public static final int ERROR_RECOGNIZER_BUSY = 8;
/** Insufficient permissions */
public static final int ERROR_INSUFFICIENT_PERMISSIONS = 9;
/** action codes */
private final static int MSG_START = 1;
private final static int MSG_STOP = 2;
private final static int MSG_CANCEL = 3;
private final static int MSG_CHANGE_LISTENER = 4;
/** The actual RecognitionService endpoint */
private IRecognitionService mService;
/** The connection to the actual service */
private Connection mConnection;
/** Context with which the manager was created */
private final Context mContext;
/** Component to direct service intent to */
private final ComponentName mServiceComponent;
/** Handler that will execute the main tasks */
private Handler mHandler = new Handler() {
@Override
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_START:
handleStartListening((Intent) msg.obj);
break;
case MSG_STOP:
handleStopMessage();
break;
case MSG_CANCEL:
handleCancelMessage();
break;
case MSG_CHANGE_LISTENER:
handleChangeListener((RecognitionListener) msg.obj);
break;
}
}
};
/**
* Temporary queue, saving the messages until the connection will be established, afterwards,
* only mHandler will receive the messages
*/
private final Queue<Message> mPendingTasks = new LinkedList<Message>();
/** The Listener that will receive all the callbacks */
private final InternalListener mListener = new InternalListener();
/**
* The right way to create a {@code SpeechRecognizer} is by using
* {@link #createSpeechRecognizer} static factory method
*/
private SpeechRecognizer(final Context context, final ComponentName serviceComponent) {
mContext = context;
mServiceComponent = serviceComponent;
}
/**
* Basic ServiceConnection that records the mService variable. Additionally, on creation it
* invokes the {@link IRecognitionService#startListening(Intent, IRecognitionListener)}.
*/
private class Connection implements ServiceConnection {
public void onServiceConnected(final ComponentName name, final IBinder service) {
// always done on the application main thread, so no need to send message to mHandler
mService = IRecognitionService.Stub.asInterface(service);
if (DBG) Log.d(TAG, "onServiceConnected - Success");
while (!mPendingTasks.isEmpty()) {
mHandler.sendMessage(mPendingTasks.poll());
}
}
public void onServiceDisconnected(final ComponentName name) {
// always done on the application main thread, so no need to send message to mHandler
mService = null;
mConnection = null;
mPendingTasks.clear();
if (DBG) Log.d(TAG, "onServiceDisconnected - Success");
}
}
/**
* Checks whether a speech recognition service is available on the system. If this method
* returns {@code false}, {@link SpeechRecognizer#createSpeechRecognizer(Context)} will
* fail.
*
* @param context with which {@code SpeechRecognizer} will be created
* @return {@code true} if recognition is available, {@code false} otherwise
*/
public static boolean isRecognitionAvailable(final Context context) {
final List<ResolveInfo> list = context.getPackageManager().queryIntentServices(
new Intent(RecognitionService.SERVICE_INTERFACE), 0);
return list != null && list.size() != 0;
}
/**
* Factory method to create a new {@code SpeechRecognizer}. Please note that
* {@link #setRecognitionListener(RecognitionListener)} should be called before dispatching any
* command to the created {@code SpeechRecognizer}, otherwise no notifications will be
* received.
*
* @param context in which to create {@code SpeechRecognizer}
* @return a new {@code SpeechRecognizer}
*/
public static SpeechRecognizer createSpeechRecognizer(final Context context) {
return createSpeechRecognizer(context, null);
}
/**
* Factory method to create a new {@code SpeechRecognizer}. Please note that
* {@link #setRecognitionListener(RecognitionListener)} should be called before dispatching any
* command to the created {@code SpeechRecognizer}, otherwise no notifications will be
* received.
*
* Use this version of the method to specify a specific service to direct this
* {@link SpeechRecognizer} to. Normally you would not use this; use
* {@link #createSpeechRecognizer(Context)} instead to use the system default recognition
* service.
*
* @param context in which to create {@code SpeechRecognizer}
* @param serviceComponent the {@link ComponentName} of a specific service to direct this
* {@code SpeechRecognizer} to
* @return a new {@code SpeechRecognizer}
*/
public static SpeechRecognizer createSpeechRecognizer(final Context context,
final ComponentName serviceComponent) {
if (context == null) {
throw new IllegalArgumentException("Context cannot be null)");
}
checkIsCalledFromMainThread();
return new SpeechRecognizer(context, serviceComponent);
}
/**
* Sets the listener that will receive all the callbacks. The previous unfinished commands will
* be executed with the old listener, while any following command will be executed with the new
* listener.
*
* @param listener listener that will receive all the callbacks from the created
* {@link SpeechRecognizer}, this must not be null.
*/
public void setRecognitionListener(RecognitionListener listener) {
checkIsCalledFromMainThread();
putMessage(Message.obtain(mHandler, MSG_CHANGE_LISTENER, listener));
}
/**
* Starts listening for speech. Please note that
* {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise
* no notifications will be received.
*
* @param recognizerIntent contains parameters for the recognition to be performed. The intent
* may also contain optional extras, see {@link RecognizerIntent}. If these values are
* not set explicitly, default values will be used by the recognizer.
*/
public void startListening(final Intent recognizerIntent) {
if (recognizerIntent == null) {
throw new IllegalArgumentException("intent must not be null");
}
checkIsCalledFromMainThread();
if (mConnection == null) { // first time connection
mConnection = new Connection();
Intent serviceIntent = new Intent(RecognitionService.SERVICE_INTERFACE);
if (mServiceComponent == null) {
String serviceComponent = Settings.Secure.getString(mContext.getContentResolver(),
Settings.Secure.VOICE_RECOGNITION_SERVICE);
if (TextUtils.isEmpty(serviceComponent)) {
Log.e(TAG, "no selected voice recognition service");
mListener.onError(ERROR_CLIENT);
return;
}
serviceIntent.setComponent(ComponentName.unflattenFromString(serviceComponent));
} else {
serviceIntent.setComponent(mServiceComponent);
}
if (!mContext.bindService(serviceIntent, mConnection, Context.BIND_AUTO_CREATE)) {
Log.e(TAG, "bind to recognition service failed");
mConnection = null;
mService = null;
mListener.onError(ERROR_CLIENT);
return;
}
}
putMessage(Message.obtain(mHandler, MSG_START, recognizerIntent));
}
/**
* Stops listening for speech. Speech captured so far will be recognized as if the user had
* stopped speaking at this point. Note that in the default case, this does not need to be
* called, as the speech endpointer will automatically stop the recognizer listening when it
* determines speech has completed. However, you can manipulate endpointer parameters directly
* using the intent extras defined in {@link RecognizerIntent}, in which case you may sometimes
* want to manually call this method to stop listening sooner. Please note that
* {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise
* no notifications will be received.
*/
public void stopListening() {
checkIsCalledFromMainThread();
putMessage(Message.obtain(mHandler, MSG_STOP));
}
/**
* Cancels the speech recognition. Please note that
* {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise
* no notifications will be received.
*/
public void cancel() {
checkIsCalledFromMainThread();
putMessage(Message.obtain(mHandler, MSG_CANCEL));
}
private static void checkIsCalledFromMainThread() {
if (Looper.myLooper() != Looper.getMainLooper()) {
throw new RuntimeException(
"SpeechRecognizer should be used only from the application's main thread");
}
}
private void putMessage(Message msg) {
if (mService == null) {
mPendingTasks.offer(msg);
} else {
mHandler.sendMessage(msg);
}
}
/** sends the actual message to the service */
private void handleStartListening(Intent recognizerIntent) {
if (!checkOpenConnection()) {
return;
}
try {
mService.startListening(recognizerIntent, mListener);
if (DBG) Log.d(TAG, "service start listening command succeded");
} catch (final RemoteException e) {
Log.e(TAG, "startListening() failed", e);
mListener.onError(ERROR_CLIENT);
}
}
/** sends the actual message to the service */
private void handleStopMessage() {
if (!checkOpenConnection()) {
return;
}
try {
mService.stopListening(mListener);
if (DBG) Log.d(TAG, "service stop listening command succeded");
} catch (final RemoteException e) {
Log.e(TAG, "stopListening() failed", e);
mListener.onError(ERROR_CLIENT);
}
}
/** sends the actual message to the service */
private void handleCancelMessage() {
if (!checkOpenConnection()) {
return;
}
try {
mService.cancel(mListener);
if (DBG) Log.d(TAG, "service cancel command succeded");
} catch (final RemoteException e) {
Log.e(TAG, "cancel() failed", e);
mListener.onError(ERROR_CLIENT);
}
}
private boolean checkOpenConnection() {
if (mService != null) {
return true;
}
mListener.onError(ERROR_CLIENT);
Log.e(TAG, "not connected to the recognition service");
return false;
}
/** changes the listener */
private void handleChangeListener(RecognitionListener listener) {
if (DBG) Log.d(TAG, "handleChangeListener, listener=" + listener);
mListener.mInternalListener = listener;
}
/**
* Destroys the {@code SpeechRecognizer} object.
*/
public void destroy() {
if (mConnection != null) {
mContext.unbindService(mConnection);
}
mPendingTasks.clear();
mService = null;
mConnection = null;
mListener.mInternalListener = null;
}
/**
* Internal wrapper of IRecognitionListener which will propagate the results to
* RecognitionListener
*/
private class InternalListener extends IRecognitionListener.Stub {
private RecognitionListener mInternalListener;
private final static int MSG_BEGINNING_OF_SPEECH = 1;
private final static int MSG_BUFFER_RECEIVED = 2;
private final static int MSG_END_OF_SPEECH = 3;
private final static int MSG_ERROR = 4;
private final static int MSG_READY_FOR_SPEECH = 5;
private final static int MSG_RESULTS = 6;
private final static int MSG_PARTIAL_RESULTS = 7;
private final static int MSG_RMS_CHANGED = 8;
private final static int MSG_ON_EVENT = 9;
private final Handler mInternalHandler = new Handler() {
@Override
public void handleMessage(Message msg) {
if (mInternalListener == null) {
return;
}
switch (msg.what) {
case MSG_BEGINNING_OF_SPEECH:
mInternalListener.onBeginningOfSpeech();
break;
case MSG_BUFFER_RECEIVED:
mInternalListener.onBufferReceived((byte[]) msg.obj);
break;
case MSG_END_OF_SPEECH:
mInternalListener.onEndOfSpeech();
break;
case MSG_ERROR:
mInternalListener.onError((Integer) msg.obj);
break;
case MSG_READY_FOR_SPEECH:
mInternalListener.onReadyForSpeech((Bundle) msg.obj);
break;
case MSG_RESULTS:
mInternalListener.onResults((Bundle) msg.obj);
break;
case MSG_PARTIAL_RESULTS:
mInternalListener.onPartialResults((Bundle) msg.obj);
break;
case MSG_RMS_CHANGED:
mInternalListener.onRmsChanged((Float) msg.obj);
break;
case MSG_ON_EVENT:
mInternalListener.onEvent(msg.arg1, (Bundle) msg.obj);
break;
}
}
};
public void onBeginningOfSpeech() {
Message.obtain(mInternalHandler, MSG_BEGINNING_OF_SPEECH).sendToTarget();
}
public void onBufferReceived(final byte[] buffer) {
Message.obtain(mInternalHandler, MSG_BUFFER_RECEIVED, buffer).sendToTarget();
}
public void onEndOfSpeech() {
Message.obtain(mInternalHandler, MSG_END_OF_SPEECH).sendToTarget();
}
public void onError(final int error) {
Message.obtain(mInternalHandler, MSG_ERROR, error).sendToTarget();
}
public void onReadyForSpeech(final Bundle noiseParams) {
Message.obtain(mInternalHandler, MSG_READY_FOR_SPEECH, noiseParams).sendToTarget();
}
public void onResults(final Bundle results) {
Message.obtain(mInternalHandler, MSG_RESULTS, results).sendToTarget();
}
public void onPartialResults(final Bundle results) {
Message.obtain(mInternalHandler, MSG_PARTIAL_RESULTS, results).sendToTarget();
}
public void onRmsChanged(final float rmsdB) {
Message.obtain(mInternalHandler, MSG_RMS_CHANGED, rmsdB).sendToTarget();
}
public void onEvent(final int eventType, final Bundle params) {
Message.obtain(mInternalHandler, MSG_ON_EVENT, eventType, eventType, params)
.sendToTarget();
}
}
}

View File

@ -0,0 +1,110 @@
/*---------------------------------------------------------------------------*
* MicrophoneInputStream.java *
* *
* Copyright 2007 Nuance Communciations, Inc. *
* *
* Licensed under the Apache License, Version 2.0 (the 'License'); *
* you may not use this file except in compliance with the License. *
* *
* You may obtain a copy of the License at *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an 'AS IS' BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
*---------------------------------------------------------------------------*/
package android.speech.srec;
import java.io.IOException;
import java.io.InputStream;
import java.lang.IllegalStateException;
/**
* PCM input stream from the microphone, 16 bits per sample.
*/
public final class MicrophoneInputStream extends InputStream {
static {
System.loadLibrary("srec_jni");
}
private final static String TAG = "MicrophoneInputStream";
private int mAudioRecord = 0;
private byte[] mOneByte = new byte[1];
/**
* MicrophoneInputStream constructor.
* @param sampleRate sample rate of the microphone, typically 11025 or 8000.
* @param fifoDepth depth of the real time fifo, measured in sampleRate clock ticks.
* This determines how long an application may delay before losing data.
*/
public MicrophoneInputStream(int sampleRate, int fifoDepth) throws IOException {
mAudioRecord = AudioRecordNew(sampleRate, fifoDepth);
if (mAudioRecord == 0) throw new IOException("AudioRecord constructor failed - busy?");
int status = AudioRecordStart(mAudioRecord);
if (status != 0) {
close();
throw new IOException("AudioRecord start failed: " + status);
}
}
@Override
public int read() throws IOException {
if (mAudioRecord == 0) throw new IllegalStateException("not open");
int rtn = AudioRecordRead(mAudioRecord, mOneByte, 0, 1);
return rtn == 1 ? ((int)mOneByte[0] & 0xff) : -1;
}
@Override
public int read(byte[] b) throws IOException {
if (mAudioRecord == 0) throw new IllegalStateException("not open");
return AudioRecordRead(mAudioRecord, b, 0, b.length);
}
@Override
public int read(byte[] b, int offset, int length) throws IOException {
if (mAudioRecord == 0) throw new IllegalStateException("not open");
// TODO: should we force all reads to be a multiple of the sample size?
return AudioRecordRead(mAudioRecord, b, offset, length);
}
/**
* Closes this stream.
*/
@Override
public void close() throws IOException {
if (mAudioRecord != 0) {
try {
AudioRecordStop(mAudioRecord);
} finally {
try {
AudioRecordDelete(mAudioRecord);
} finally {
mAudioRecord = 0;
}
}
}
}
@Override
protected void finalize() throws Throwable {
if (mAudioRecord != 0) {
close();
throw new IOException("someone forgot to close MicrophoneInputStream");
}
}
//
// AudioRecord JNI interface
//
private static native int AudioRecordNew(int sampleRate, int fifoDepth);
private static native int AudioRecordStart(int audioRecord);
private static native int AudioRecordRead(int audioRecord, byte[] b, int offset, int length) throws IOException;
private static native void AudioRecordStop(int audioRecord) throws IOException;
private static native void AudioRecordDelete(int audioRecord) throws IOException;
}

View File

@ -0,0 +1,719 @@
/*
* ---------------------------------------------------------------------------
* Recognizer.java
*
* Copyright 2007 Nuance Communciations, Inc.
*
* Licensed under the Apache License, Version 2.0 (the 'License'); you may not
* use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* ---------------------------------------------------------------------------
*/
package android.speech.srec;
import android.util.Config;
import android.util.Log;
import java.io.File;
import java.io.InputStream;
import java.io.IOException;
import java.util.Locale;
/**
* Simple, synchronous speech recognizer, using the Nuance SREC package.
* Usages proceeds as follows:
*
* <ul>
* <li>Create a <code>Recognizer</code>.
* <li>Create a <code>Recognizer.Grammar</code>.
* <li>Setup the <code>Recognizer.Grammar</code>.
* <li>Reset the <code>Recognizer.Grammar</code> slots, if needed.
* <li>Fill the <code>Recognizer.Grammar</code> slots, if needed.
* <li>Compile the <code>Recognizer.Grammar</code>, if needed.
* <li>Save the filled <code>Recognizer.Grammar</code>, if needed.
* <li>Start the <code>Recognizer</code>.
* <li>Loop over <code>advance</code> and <code>putAudio</code> until recognition complete.
* <li>Fetch and process results, or notify of failure.
* <li>Stop the <code>Recognizer</code>.
* <li>Destroy the <code>Recognizer</code>.
* </ul>
*
* <p>Below is example code</p>
*
* <pre class="prettyprint">
*
* // create and start audio input
* InputStream audio = new MicrophoneInputStream(11025, 11025*5);
* // create a Recognizer
* String cdir = Recognizer.getConfigDir(null);
* Recognizer recognizer = new Recognizer(cdir + "/baseline11k.par");
* // create and load a Grammar
* Recognizer.Grammar grammar = recognizer.new Grammar(cdir + "/grammars/VoiceDialer.g2g");
* // setup the Grammar to work with the Recognizer
* grammar.setupRecognizer();
* // fill the Grammar slots with names and save, if required
* grammar.resetAllSlots();
* for (String name : names) grammar.addWordToSlot("@Names", name, null, 1, "V=1");
* grammar.compile();
* grammar.save(".../foo.g2g");
* // start the Recognizer
* recognizer.start();
* // loop over Recognizer events
* while (true) {
* switch (recognizer.advance()) {
* case Recognizer.EVENT_INCOMPLETE:
* case Recognizer.EVENT_STARTED:
* case Recognizer.EVENT_START_OF_VOICING:
* case Recognizer.EVENT_END_OF_VOICING:
* // let the Recognizer continue to run
* continue;
* case Recognizer.EVENT_RECOGNITION_RESULT:
* // success, so fetch results here!
* for (int i = 0; i < recognizer.getResultCount(); i++) {
* String result = recognizer.getResult(i, Recognizer.KEY_LITERAL);
* }
* break;
* case Recognizer.EVENT_NEED_MORE_AUDIO:
* // put more audio in the Recognizer
* recognizer.putAudio(audio);
* continue;
* default:
* notifyFailure();
* break;
* }
* break;
* }
* // stop the Recognizer
* recognizer.stop();
* // destroy the Recognizer
* recognizer.destroy();
* // stop the audio device
* audio.close();
*
* </pre>
*/
public final class Recognizer {
static {
System.loadLibrary("srec_jni");
}
private static String TAG = "Recognizer";
/**
* Result key corresponding to confidence score.
*/
public static final String KEY_CONFIDENCE = "conf";
/**
* Result key corresponding to literal text.
*/
public static final String KEY_LITERAL = "literal";
/**
* Result key corresponding to semantic meaning text.
*/
public static final String KEY_MEANING = "meaning";
// handle to SR_Vocabulary object
private int mVocabulary = 0;
// handle to SR_Recognizer object
private int mRecognizer = 0;
// Grammar currently associated with Recognizer via SR_GrammarSetupRecognizer
private Grammar mActiveGrammar = null;
/**
* Get the pathname of the SREC configuration directory corresponding to the
* language indicated by the Locale.
* This directory contains dictionaries, speech models,
* configuration files, and other data needed by the Recognizer.
* @param locale <code>Locale</code> corresponding to the desired language,
* or null for default, currently <code>Locale.US</code>.
* @return Pathname of the configuration directory.
*/
public static String getConfigDir(Locale locale) {
if (locale == null) locale = Locale.US;
String dir = "/system/usr/srec/config/" +
locale.toString().replace('_', '.').toLowerCase();
if ((new File(dir)).isDirectory()) return dir;
return null;
}
/**
* Create an instance of a SREC speech recognizer.
*
* @param configFile pathname of the baseline*.par configuration file,
* which in turn contains references to dictionaries, speech models,
* and other data needed to configure and operate the recognizer.
* A separate config file is needed for each audio sample rate.
* Two files, baseline11k.par and baseline8k.par, which correspond to
* 11025 and 8000 hz, are present in the directory indicated by
* {@link #getConfigDir}.
* @throws IOException
*/
public Recognizer(String configFile) throws IOException {
PMemInit();
SR_SessionCreate(configFile);
mRecognizer = SR_RecognizerCreate();
SR_RecognizerSetup(mRecognizer);
mVocabulary = SR_VocabularyLoad();
}
/**
* Represents a grammar loaded into the Recognizer.
*/
public class Grammar {
private int mGrammar = 0;
/**
* Create a <code>Grammar</code> instance.
* @param g2gFileName pathname of g2g file.
*/
public Grammar(String g2gFileName) throws IOException {
mGrammar = SR_GrammarLoad(g2gFileName);
SR_GrammarSetupVocabulary(mGrammar, mVocabulary);
}
/**
* Reset all slots.
*/
public void resetAllSlots() {
SR_GrammarResetAllSlots(mGrammar);
}
/**
* Add a word to a slot.
*
* @param slot slot name.
* @param word word to insert.
* @param pron pronunciation, or null to derive from word.
* @param weight weight to give the word. One is normal, 50 is low.
* @param tag semantic meaning tag string.
*/
public void addWordToSlot(String slot, String word, String pron, int weight, String tag) {
SR_GrammarAddWordToSlot(mGrammar, slot, word, pron, weight, tag);
}
/**
* Compile all slots.
*/
public void compile() {
SR_GrammarCompile(mGrammar);
}
/**
* Setup <code>Grammar</code> with <code>Recognizer</code>.
*/
public void setupRecognizer() {
SR_GrammarSetupRecognizer(mGrammar, mRecognizer);
mActiveGrammar = this;
}
/**
* Save <code>Grammar</code> to g2g file.
*
* @param g2gFileName
* @throws IOException
*/
public void save(String g2gFileName) throws IOException {
SR_GrammarSave(mGrammar, g2gFileName);
}
/**
* Release resources associated with this <code>Grammar</code>.
*/
public void destroy() {
// TODO: need to do cleanup and disassociation with Recognizer
if (mGrammar != 0) {
SR_GrammarDestroy(mGrammar);
mGrammar = 0;
}
}
/**
* Clean up resources.
*/
protected void finalize() {
if (mGrammar != 0) {
destroy();
throw new IllegalStateException("someone forgot to destroy Grammar");
}
}
}
/**
* Start recognition
*/
public void start() {
// TODO: shouldn't be here?
SR_RecognizerActivateRule(mRecognizer, mActiveGrammar.mGrammar, "trash", 1);
SR_RecognizerStart(mRecognizer);
}
/**
* Process some audio and return the current status.
* @return recognition event, one of:
* <ul>
* <li><code>EVENT_INVALID</code>
* <li><code>EVENT_NO_MATCH</code>
* <li><code>EVENT_INCOMPLETE</code>
* <li><code>EVENT_STARTED</code>
* <li><code>EVENT_STOPPED</code>
* <li><code>EVENT_START_OF_VOICING</code>
* <li><code>EVENT_END_OF_VOICING</code>
* <li><code>EVENT_SPOKE_TOO_SOON</code>
* <li><code>EVENT_RECOGNITION_RESULT</code>
* <li><code>EVENT_START_OF_UTTERANCE_TIMEOUT</code>
* <li><code>EVENT_RECOGNITION_TIMEOUT</code>
* <li><code>EVENT_NEED_MORE_AUDIO</code>
* <li><code>EVENT_MAX_SPEECH</code>
* </ul>
*/
public int advance() {
return SR_RecognizerAdvance(mRecognizer);
}
/**
* Put audio samples into the <code>Recognizer</code>.
* @param buf holds the audio samples.
* @param offset offset of the first sample.
* @param length number of bytes containing samples.
* @param isLast indicates no more audio data, normally false.
* @return number of bytes accepted.
*/
public int putAudio(byte[] buf, int offset, int length, boolean isLast) {
return SR_RecognizerPutAudio(mRecognizer, buf, offset, length, isLast);
}
/**
* Read audio samples from an <code>InputStream</code> and put them in the
* <code>Recognizer</code>.
* @param audio <code>InputStream</code> containing PCM audio samples.
*/
public void putAudio(InputStream audio) throws IOException {
// make sure the audio buffer is allocated
if (mPutAudioBuffer == null) mPutAudioBuffer = new byte[512];
// read some data
int nbytes = audio.read(mPutAudioBuffer);
// eof, so signal Recognizer
if (nbytes == -1) {
SR_RecognizerPutAudio(mRecognizer, mPutAudioBuffer, 0, 0, true);
}
// put it into the Recognizer
else if (nbytes != SR_RecognizerPutAudio(mRecognizer, mPutAudioBuffer, 0, nbytes, false)) {
throw new IOException("SR_RecognizerPutAudio failed nbytes=" + nbytes);
}
}
// audio buffer for putAudio(InputStream)
private byte[] mPutAudioBuffer = null;
/**
* Get the number of recognition results. Must be called after
* <code>EVENT_RECOGNITION_RESULT</code> is returned by
* <code>advance</code>, but before <code>stop</code>.
*
* @return number of results in nbest list.
*/
public int getResultCount() {
return SR_RecognizerResultGetSize(mRecognizer);
}
/**
* Get a set of keys for the result. Must be called after
* <code>EVENT_RECOGNITION_RESULT</code> is returned by
* <code>advance</code>, but before <code>stop</code>.
*
* @param index index of result.
* @return array of keys.
*/
public String[] getResultKeys(int index) {
return SR_RecognizerResultGetKeyList(mRecognizer, index);
}
/**
* Get a result value. Must be called after
* <code>EVENT_RECOGNITION_RESULT</code> is returned by
* <code>advance</code>, but before <code>stop</code>.
*
* @param index index of the result.
* @param key key of the result. This is typically one of
* <code>KEY_CONFIDENCE</code>, <code>KEY_LITERAL</code>, or
* <code>KEY_MEANING</code>, but the user can also define their own keys
* in a grxml file, or in the <code>tag</code> slot of
* <code>Grammar.addWordToSlot</code>.
* @return the result.
*/
public String getResult(int index, String key) {
return SR_RecognizerResultGetValue(mRecognizer, index, key);
}
/**
* Stop the <code>Recognizer</code>.
*/
public void stop() {
SR_RecognizerStop(mRecognizer);
SR_RecognizerDeactivateRule(mRecognizer, mActiveGrammar.mGrammar, "trash");
}
/**
* Reset the acoustic state vectorto it's default value.
*
* @hide
*/
public void resetAcousticState() {
SR_AcousticStateReset(mRecognizer);
}
/**
* Set the acoustic state vector.
* @param state String containing the acoustic state vector.
*
* @hide
*/
public void setAcousticState(String state) {
SR_AcousticStateSet(mRecognizer, state);
}
/**
* Get the acoustic state vector.
* @return String containing the acoustic state vector.
*
* @hide
*/
public String getAcousticState() {
return SR_AcousticStateGet(mRecognizer);
}
/**
* Clean up resources.
*/
public void destroy() {
try {
if (mVocabulary != 0) SR_VocabularyDestroy(mVocabulary);
} finally {
mVocabulary = 0;
try {
if (mRecognizer != 0) SR_RecognizerUnsetup(mRecognizer);
} finally {
try {
if (mRecognizer != 0) SR_RecognizerDestroy(mRecognizer);
} finally {
mRecognizer = 0;
try {
SR_SessionDestroy();
} finally {
PMemShutdown();
}
}
}
}
}
/**
* Clean up resources.
*/
protected void finalize() throws Throwable {
if (mVocabulary != 0 || mRecognizer != 0) {
destroy();
throw new IllegalStateException("someone forgot to destroy Recognizer");
}
}
/* an example session captured, for reference
void doall() {
if (PMemInit ( )
|| lhs_audioinOpen ( WAVE_MAPPER, SREC_TEST_DEFAULT_AUDIO_FREQUENCY, &audio_in_handle )
|| srec_test_init_application_data ( &applicationData, argc, argv )
|| SR_SessionCreate ( "/system/usr/srec/config/en.us/baseline11k.par" )
|| SR_RecognizerCreate ( &applicationData.recognizer )
|| SR_RecognizerSetup ( applicationData.recognizer)
|| ESR_SessionGetLCHAR ( L("cmdline.vocabulary"), filename, &flen )
|| SR_VocabularyLoad ( filename, &applicationData.vocabulary )
|| SR_VocabularyGetLanguage ( applicationData.vocabulary, &applicationData.locale )
|| (applicationData.nametag = NULL)
|| SR_NametagsCreate ( &applicationData.nametags )
|| (LSTRCPY ( applicationData.grammars [0].grammar_path, "/system/usr/srec/config/en.us/grammars/VoiceDialer.g2g" ), 0)
|| (LSTRCPY ( applicationData.grammars [0].grammarID, "BothTags" ), 0)
|| (LSTRCPY ( applicationData.grammars [0].ruleName, "trash" ), 0)
|| (applicationData.grammars [0].is_ve_grammar = ESR_FALSE, 0)
|| SR_GrammarLoad (applicationData.grammars [0].grammar_path, &applicationData.grammars [applicationData.grammarCount].grammar )
|| SR_GrammarSetupVocabulary ( applicationData.grammars [0].grammar, applicationData.vocabulary )
|| SR_GrammarSetupRecognizer( applicationData.grammars [0].grammar, applicationData.recognizer )
|| SR_GrammarSetDispatchFunction ( applicationData.grammars [0].grammar, L("myDSMCallback"), NULL, myDSMCallback )
|| (applicationData.grammarCount++, 0)
|| SR_RecognizerActivateRule ( applicationData.recognizer, applicationData.grammars [0].grammar,
applicationData.grammars [0].ruleName, 1 )
|| (applicationData.active_grammar_num = 0, 0)
|| lhs_audioinStart ( audio_in_handle )
|| SR_RecognizerStart ( applicationData.recognizer )
|| strl ( applicationData.grammars [0].grammar, &applicationData, audio_in_handle, &recognition_count )
|| SR_RecognizerStop ( applicationData.recognizer )
|| lhs_audioinStop ( audio_in_handle )
|| SR_RecognizerDeactivateRule ( applicationData.recognizer, applicationData.grammars [0].grammar, applicationData.grammars [0].ruleName )
|| (applicationData.active_grammar_num = -1, 0)
|| SR_GrammarDestroy ( applicationData.grammars [0].grammar )
|| (applicationData.grammarCount--, 0)
|| SR_NametagsDestroy ( applicationData.nametags )
|| (applicationData.nametags = NULL, 0)
|| SR_VocabularyDestroy ( applicationData.vocabulary )
|| (applicationData.vocabulary = NULL)
|| SR_RecognizerUnsetup ( applicationData.recognizer) // releases acoustic models
|| SR_RecognizerDestroy ( applicationData.recognizer )
|| (applicationData.recognizer = NULL)
|| SR_SessionDestroy ( )
|| srec_test_shutdown_application_data ( &applicationData )
|| lhs_audioinClose ( &audio_in_handle )
|| PMemShutdown ( )
}
*/
//
// PMem native methods
//
private static native void PMemInit();
private static native void PMemShutdown();
//
// SR_Session native methods
//
private static native void SR_SessionCreate(String filename);
private static native void SR_SessionDestroy();
//
// SR_Recognizer native methods
//
/**
* Reserved value.
*/
public final static int EVENT_INVALID = 0;
/**
* <code>Recognizer</code> could not find a match for the utterance.
*/
public final static int EVENT_NO_MATCH = 1;
/**
* <code>Recognizer</code> processed one frame of audio.
*/
public final static int EVENT_INCOMPLETE = 2;
/**
* <code>Recognizer</code> has just been started.
*/
public final static int EVENT_STARTED = 3;
/**
* <code>Recognizer</code> is stopped.
*/
public final static int EVENT_STOPPED = 4;
/**
* Beginning of speech detected.
*/
public final static int EVENT_START_OF_VOICING = 5;
/**
* End of speech detected.
*/
public final static int EVENT_END_OF_VOICING = 6;
/**
* Beginning of utterance occured too soon.
*/
public final static int EVENT_SPOKE_TOO_SOON = 7;
/**
* Recognition match detected.
*/
public final static int EVENT_RECOGNITION_RESULT = 8;
/**
* Timeout occured before beginning of utterance.
*/
public final static int EVENT_START_OF_UTTERANCE_TIMEOUT = 9;
/**
* Timeout occured before speech recognition could complete.
*/
public final static int EVENT_RECOGNITION_TIMEOUT = 10;
/**
* Not enough samples to process one frame.
*/
public final static int EVENT_NEED_MORE_AUDIO = 11;
/**
* More audio encountered than is allowed by 'swirec_max_speech_duration'.
*/
public final static int EVENT_MAX_SPEECH = 12;
/**
* Produce a displayable string from an <code>advance</code> event.
* @param event
* @return String representing the event.
*/
public static String eventToString(int event) {
switch (event) {
case EVENT_INVALID:
return "EVENT_INVALID";
case EVENT_NO_MATCH:
return "EVENT_NO_MATCH";
case EVENT_INCOMPLETE:
return "EVENT_INCOMPLETE";
case EVENT_STARTED:
return "EVENT_STARTED";
case EVENT_STOPPED:
return "EVENT_STOPPED";
case EVENT_START_OF_VOICING:
return "EVENT_START_OF_VOICING";
case EVENT_END_OF_VOICING:
return "EVENT_END_OF_VOICING";
case EVENT_SPOKE_TOO_SOON:
return "EVENT_SPOKE_TOO_SOON";
case EVENT_RECOGNITION_RESULT:
return "EVENT_RECOGNITION_RESULT";
case EVENT_START_OF_UTTERANCE_TIMEOUT:
return "EVENT_START_OF_UTTERANCE_TIMEOUT";
case EVENT_RECOGNITION_TIMEOUT:
return "EVENT_RECOGNITION_TIMEOUT";
case EVENT_NEED_MORE_AUDIO:
return "EVENT_NEED_MORE_AUDIO";
case EVENT_MAX_SPEECH:
return "EVENT_MAX_SPEECH";
}
return "EVENT_" + event;
}
//
// SR_Recognizer methods
//
private static native void SR_RecognizerStart(int recognizer);
private static native void SR_RecognizerStop(int recognizer);
private static native int SR_RecognizerCreate();
private static native void SR_RecognizerDestroy(int recognizer);
private static native void SR_RecognizerSetup(int recognizer);
private static native void SR_RecognizerUnsetup(int recognizer);
private static native boolean SR_RecognizerIsSetup(int recognizer);
private static native String SR_RecognizerGetParameter(int recognizer, String key);
private static native int SR_RecognizerGetSize_tParameter(int recognizer, String key);
private static native boolean SR_RecognizerGetBoolParameter(int recognizer, String key);
private static native void SR_RecognizerSetParameter(int recognizer, String key, String value);
private static native void SR_RecognizerSetSize_tParameter(int recognizer,
String key, int value);
private static native void SR_RecognizerSetBoolParameter(int recognizer, String key,
boolean value);
private static native void SR_RecognizerSetupRule(int recognizer, int grammar,
String ruleName);
private static native boolean SR_RecognizerHasSetupRules(int recognizer);
private static native void SR_RecognizerActivateRule(int recognizer, int grammar,
String ruleName, int weight);
private static native void SR_RecognizerDeactivateRule(int recognizer, int grammar,
String ruleName);
private static native void SR_RecognizerDeactivateAllRules(int recognizer);
private static native boolean SR_RecognizerIsActiveRule(int recognizer, int grammar,
String ruleName);
private static native boolean SR_RecognizerCheckGrammarConsistency(int recognizer,
int grammar);
private static native int SR_RecognizerPutAudio(int recognizer, byte[] buffer, int offset,
int length, boolean isLast);
private static native int SR_RecognizerAdvance(int recognizer);
// private static native void SR_RecognizerLoadUtterance(int recognizer,
// const LCHAR* filename);
// private static native void SR_RecognizerLoadWaveFile(int recognizer,
// const LCHAR* filename);
// private static native void SR_RecognizerSetLockFunction(int recognizer,
// SR_RecognizerLockFunction function, void* data);
private static native boolean SR_RecognizerIsSignalClipping(int recognizer);
private static native boolean SR_RecognizerIsSignalDCOffset(int recognizer);
private static native boolean SR_RecognizerIsSignalNoisy(int recognizer);
private static native boolean SR_RecognizerIsSignalTooQuiet(int recognizer);
private static native boolean SR_RecognizerIsSignalTooFewSamples(int recognizer);
private static native boolean SR_RecognizerIsSignalTooManySamples(int recognizer);
// private static native void SR_Recognizer_Change_Sample_Rate (size_t new_sample_rate);
//
// SR_AcousticState native methods
//
private static native void SR_AcousticStateReset(int recognizer);
private static native void SR_AcousticStateSet(int recognizer, String state);
private static native String SR_AcousticStateGet(int recognizer);
//
// SR_Grammar native methods
//
private static native void SR_GrammarCompile(int grammar);
private static native void SR_GrammarAddWordToSlot(int grammar, String slot,
String word, String pronunciation, int weight, String tag);
private static native void SR_GrammarResetAllSlots(int grammar);
// private static native void SR_GrammarAddNametagToSlot(int grammar, String slot,
// const struct SR_Nametag_t* nametag, int weight, String tag);
private static native void SR_GrammarSetupVocabulary(int grammar, int vocabulary);
// private static native void SR_GrammarSetupModels(int grammar, SR_AcousticModels* models);
private static native void SR_GrammarSetupRecognizer(int grammar, int recognizer);
private static native void SR_GrammarUnsetupRecognizer(int grammar);
// private static native void SR_GrammarGetModels(int grammar,SR_AcousticModels** models);
private static native int SR_GrammarCreate();
private static native void SR_GrammarDestroy(int grammar);
private static native int SR_GrammarLoad(String filename);
private static native void SR_GrammarSave(int grammar, String filename);
// private static native void SR_GrammarSetDispatchFunction(int grammar,
// const LCHAR* name, void* userData, SR_GrammarDispatchFunction function);
// private static native void SR_GrammarSetParameter(int grammar, const
// LCHAR* key, void* value);
// private static native void SR_GrammarSetSize_tParameter(int grammar,
// const LCHAR* key, size_t value);
// private static native void SR_GrammarGetParameter(int grammar, const
// LCHAR* key, void** value);
// private static native void SR_GrammarGetSize_tParameter(int grammar,
// const LCHAR* key, size_t* value);
// private static native void SR_GrammarCheckParse(int grammar, const LCHAR*
// transcription, SR_SemanticResult** result, size_t* resultCount);
private static native void SR_GrammarAllowOnly(int grammar, String transcription);
private static native void SR_GrammarAllowAll(int grammar);
//
// SR_Vocabulary native methods
//
// private static native int SR_VocabularyCreate();
private static native int SR_VocabularyLoad();
// private static native void SR_VocabularySave(SR_Vocabulary* self,
// const LCHAR* filename);
// private static native void SR_VocabularyAddWord(SR_Vocabulary* self,
// const LCHAR* word);
// private static native void SR_VocabularyGetLanguage(SR_Vocabulary* self,
// ESR_Locale* locale);
private static native void SR_VocabularyDestroy(int vocabulary);
private static native String SR_VocabularyGetPronunciation(int vocabulary, String word);
//
// SR_RecognizerResult native methods
//
private static native byte[] SR_RecognizerResultGetWaveform(int recognizer);
private static native int SR_RecognizerResultGetSize(int recognizer);
private static native int SR_RecognizerResultGetKeyCount(int recognizer, int nbest);
private static native String[] SR_RecognizerResultGetKeyList(int recognizer, int nbest);
private static native String SR_RecognizerResultGetValue(int recognizer,
int nbest, String key);
// private static native void SR_RecognizerResultGetLocale(int recognizer, ESR_Locale* locale);
}

View File

@ -0,0 +1,187 @@
/*
* ---------------------------------------------------------------------------
* UlawEncoderInputStream.java
*
* Copyright 2008 Nuance Communciations, Inc.
*
* Licensed under the Apache License, Version 2.0 (the 'License'); you may not
* use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* ---------------------------------------------------------------------------
*/
package android.speech.srec;
import java.io.IOException;
import java.io.InputStream;
/**
* InputStream which transforms 16 bit pcm data to ulaw data.
*
* Not yet ready to be supported, so
* @hide
*/
public final class UlawEncoderInputStream extends InputStream {
private final static String TAG = "UlawEncoderInputStream";
private final static int MAX_ULAW = 8192;
private final static int SCALE_BITS = 16;
private InputStream mIn;
private int mMax = 0;
private final byte[] mBuf = new byte[1024];
private int mBufCount = 0; // should be 0 or 1
private final byte[] mOneByte = new byte[1];
public static void encode(byte[] pcmBuf, int pcmOffset,
byte[] ulawBuf, int ulawOffset, int length, int max) {
// from 'ulaw' in wikipedia
// +8191 to +8159 0x80
// +8158 to +4063 in 16 intervals of 256 0x80 + interval number
// +4062 to +2015 in 16 intervals of 128 0x90 + interval number
// +2014 to +991 in 16 intervals of 64 0xA0 + interval number
// +990 to +479 in 16 intervals of 32 0xB0 + interval number
// +478 to +223 in 16 intervals of 16 0xC0 + interval number
// +222 to +95 in 16 intervals of 8 0xD0 + interval number
// +94 to +31 in 16 intervals of 4 0xE0 + interval number
// +30 to +1 in 15 intervals of 2 0xF0 + interval number
// 0 0xFF
// -1 0x7F
// -31 to -2 in 15 intervals of 2 0x70 + interval number
// -95 to -32 in 16 intervals of 4 0x60 + interval number
// -223 to -96 in 16 intervals of 8 0x50 + interval number
// -479 to -224 in 16 intervals of 16 0x40 + interval number
// -991 to -480 in 16 intervals of 32 0x30 + interval number
// -2015 to -992 in 16 intervals of 64 0x20 + interval number
// -4063 to -2016 in 16 intervals of 128 0x10 + interval number
// -8159 to -4064 in 16 intervals of 256 0x00 + interval number
// -8192 to -8160 0x00
// set scale factors
if (max <= 0) max = MAX_ULAW;
int coef = MAX_ULAW * (1 << SCALE_BITS) / max;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[pcmOffset++]) + (pcmBuf[pcmOffset++] << 8);
pcm = (pcm * coef) >> SCALE_BITS;
int ulaw;
if (pcm >= 0) {
ulaw = pcm <= 0 ? 0xff :
pcm <= 30 ? 0xf0 + (( 30 - pcm) >> 1) :
pcm <= 94 ? 0xe0 + (( 94 - pcm) >> 2) :
pcm <= 222 ? 0xd0 + (( 222 - pcm) >> 3) :
pcm <= 478 ? 0xc0 + (( 478 - pcm) >> 4) :
pcm <= 990 ? 0xb0 + (( 990 - pcm) >> 5) :
pcm <= 2014 ? 0xa0 + ((2014 - pcm) >> 6) :
pcm <= 4062 ? 0x90 + ((4062 - pcm) >> 7) :
pcm <= 8158 ? 0x80 + ((8158 - pcm) >> 8) :
0x80;
} else {
ulaw = -1 <= pcm ? 0x7f :
-31 <= pcm ? 0x70 + ((pcm - -31) >> 1) :
-95 <= pcm ? 0x60 + ((pcm - -95) >> 2) :
-223 <= pcm ? 0x50 + ((pcm - -223) >> 3) :
-479 <= pcm ? 0x40 + ((pcm - -479) >> 4) :
-991 <= pcm ? 0x30 + ((pcm - -991) >> 5) :
-2015 <= pcm ? 0x20 + ((pcm - -2015) >> 6) :
-4063 <= pcm ? 0x10 + ((pcm - -4063) >> 7) :
-8159 <= pcm ? 0x00 + ((pcm - -8159) >> 8) :
0x00;
}
ulawBuf[ulawOffset++] = (byte)ulaw;
}
}
/**
* Compute the maximum of the absolute value of the pcm samples.
* The return value can be used to set ulaw encoder scaling.
* @param pcmBuf array containing 16 bit pcm data.
* @param offset offset of start of 16 bit pcm data.
* @param length number of pcm samples (not number of input bytes)
* @return maximum abs of pcm data values
*/
public static int maxAbsPcm(byte[] pcmBuf, int offset, int length) {
int max = 0;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[offset++]) + (pcmBuf[offset++] << 8);
if (pcm < 0) pcm = -pcm;
if (pcm > max) max = pcm;
}
return max;
}
/**
* Create an InputStream which takes 16 bit pcm data and produces ulaw data.
* @param in InputStream containing 16 bit pcm data.
* @param max pcm value corresponding to maximum ulaw value.
*/
public UlawEncoderInputStream(InputStream in, int max) {
mIn = in;
mMax = max;
}
@Override
public int read(byte[] buf, int offset, int length) throws IOException {
if (mIn == null) throw new IllegalStateException("not open");
// return at least one byte, but try to fill 'length'
while (mBufCount < 2) {
int n = mIn.read(mBuf, mBufCount, Math.min(length * 2, mBuf.length - mBufCount));
if (n == -1) return -1;
mBufCount += n;
}
// compand data
int n = Math.min(mBufCount / 2, length);
encode(mBuf, 0, buf, offset, n, mMax);
// move data to bottom of mBuf
mBufCount -= n * 2;
for (int i = 0; i < mBufCount; i++) mBuf[i] = mBuf[i + n * 2];
return n;
}
@Override
public int read(byte[] buf) throws IOException {
return read(buf, 0, buf.length);
}
@Override
public int read() throws IOException {
int n = read(mOneByte, 0, 1);
if (n == -1) return -1;
return 0xff & (int)mOneByte[0];
}
@Override
public void close() throws IOException {
if (mIn != null) {
InputStream in = mIn;
mIn = null;
in.close();
}
}
@Override
public int available() throws IOException {
return (mIn.available() + mBufCount) / 2;
}
}

View File

@ -0,0 +1,276 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech.srec;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* This class represents the header of a WAVE format audio file, which usually
* have a .wav suffix. The following integer valued fields are contained:
* <ul>
* <li> format - usually PCM, ALAW or ULAW.
* <li> numChannels - 1 for mono, 2 for stereo.
* <li> sampleRate - usually 8000, 11025, 16000, 22050, or 44100 hz.
* <li> bitsPerSample - usually 16 for PCM, 8 for ALAW, or 8 for ULAW.
* <li> numBytes - size of audio data after this header, in bytes.
* </ul>
*
* Not yet ready to be supported, so
* @hide
*/
public class WaveHeader {
// follows WAVE format in http://ccrma.stanford.edu/courses/422/projects/WaveFormat
private static final String TAG = "WaveHeader";
private static final int HEADER_LENGTH = 44;
/** Indicates PCM format. */
public static final short FORMAT_PCM = 1;
/** Indicates ALAW format. */
public static final short FORMAT_ALAW = 6;
/** Indicates ULAW format. */
public static final short FORMAT_ULAW = 7;
private short mFormat;
private short mNumChannels;
private int mSampleRate;
private short mBitsPerSample;
private int mNumBytes;
/**
* Construct a WaveHeader, with all fields defaulting to zero.
*/
public WaveHeader() {
}
/**
* Construct a WaveHeader, with fields initialized.
* @param format format of audio data,
* one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}.
* @param numChannels 1 for mono, 2 for stereo.
* @param sampleRate typically 8000, 11025, 16000, 22050, or 44100 hz.
* @param bitsPerSample usually 16 for PCM, 8 for ULAW or 8 for ALAW.
* @param numBytes size of audio data after this header, in bytes.
*/
public WaveHeader(short format, short numChannels, int sampleRate, short bitsPerSample, int numBytes) {
mFormat = format;
mSampleRate = sampleRate;
mNumChannels = numChannels;
mBitsPerSample = bitsPerSample;
mNumBytes = numBytes;
}
/**
* Get the format field.
* @return format field,
* one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}.
*/
public short getFormat() {
return mFormat;
}
/**
* Set the format field.
* @param format
* one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}.
* @return reference to this WaveHeader instance.
*/
public WaveHeader setFormat(short format) {
mFormat = format;
return this;
}
/**
* Get the number of channels.
* @return number of channels, 1 for mono, 2 for stereo.
*/
public short getNumChannels() {
return mNumChannels;
}
/**
* Set the number of channels.
* @param numChannels 1 for mono, 2 for stereo.
* @return reference to this WaveHeader instance.
*/
public WaveHeader setNumChannels(short numChannels) {
mNumChannels = numChannels;
return this;
}
/**
* Get the sample rate.
* @return sample rate, typically 8000, 11025, 16000, 22050, or 44100 hz.
*/
public int getSampleRate() {
return mSampleRate;
}
/**
* Set the sample rate.
* @param sampleRate sample rate, typically 8000, 11025, 16000, 22050, or 44100 hz.
* @return reference to this WaveHeader instance.
*/
public WaveHeader setSampleRate(int sampleRate) {
mSampleRate = sampleRate;
return this;
}
/**
* Get the number of bits per sample.
* @return number of bits per sample,
* usually 16 for PCM, 8 for ULAW or 8 for ALAW.
*/
public short getBitsPerSample() {
return mBitsPerSample;
}
/**
* Set the number of bits per sample.
* @param bitsPerSample number of bits per sample,
* usually 16 for PCM, 8 for ULAW or 8 for ALAW.
* @return reference to this WaveHeader instance.
*/
public WaveHeader setBitsPerSample(short bitsPerSample) {
mBitsPerSample = bitsPerSample;
return this;
}
/**
* Get the size of audio data after this header, in bytes.
* @return size of audio data after this header, in bytes.
*/
public int getNumBytes() {
return mNumBytes;
}
/**
* Set the size of audio data after this header, in bytes.
* @param numBytes size of audio data after this header, in bytes.
* @return reference to this WaveHeader instance.
*/
public WaveHeader setNumBytes(int numBytes) {
mNumBytes = numBytes;
return this;
}
/**
* Read and initialize a WaveHeader.
* @param in {@link java.io.InputStream} to read from.
* @return number of bytes consumed.
* @throws IOException
*/
public int read(InputStream in) throws IOException {
/* RIFF header */
readId(in, "RIFF");
int numBytes = readInt(in) - 36;
readId(in, "WAVE");
/* fmt chunk */
readId(in, "fmt ");
if (16 != readInt(in)) throw new IOException("fmt chunk length not 16");
mFormat = readShort(in);
mNumChannels = readShort(in);
mSampleRate = readInt(in);
int byteRate = readInt(in);
short blockAlign = readShort(in);
mBitsPerSample = readShort(in);
if (byteRate != mNumChannels * mSampleRate * mBitsPerSample / 8) {
throw new IOException("fmt.ByteRate field inconsistent");
}
if (blockAlign != mNumChannels * mBitsPerSample / 8) {
throw new IOException("fmt.BlockAlign field inconsistent");
}
/* data chunk */
readId(in, "data");
mNumBytes = readInt(in);
return HEADER_LENGTH;
}
private static void readId(InputStream in, String id) throws IOException {
for (int i = 0; i < id.length(); i++) {
if (id.charAt(i) != in.read()) throw new IOException( id + " tag not present");
}
}
private static int readInt(InputStream in) throws IOException {
return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24);
}
private static short readShort(InputStream in) throws IOException {
return (short)(in.read() | (in.read() << 8));
}
/**
* Write a WAVE file header.
* @param out {@link java.io.OutputStream} to receive the header.
* @return number of bytes written.
* @throws IOException
*/
public int write(OutputStream out) throws IOException {
/* RIFF header */
writeId(out, "RIFF");
writeInt(out, 36 + mNumBytes);
writeId(out, "WAVE");
/* fmt chunk */
writeId(out, "fmt ");
writeInt(out, 16);
writeShort(out, mFormat);
writeShort(out, mNumChannels);
writeInt(out, mSampleRate);
writeInt(out, mNumChannels * mSampleRate * mBitsPerSample / 8);
writeShort(out, (short)(mNumChannels * mBitsPerSample / 8));
writeShort(out, mBitsPerSample);
/* data chunk */
writeId(out, "data");
writeInt(out, mNumBytes);
return HEADER_LENGTH;
}
private static void writeId(OutputStream out, String id) throws IOException {
for (int i = 0; i < id.length(); i++) out.write(id.charAt(i));
}
private static void writeInt(OutputStream out, int val) throws IOException {
out.write(val >> 0);
out.write(val >> 8);
out.write(val >> 16);
out.write(val >> 24);
}
private static void writeShort(OutputStream out, short val) throws IOException {
out.write(val >> 0);
out.write(val >> 8);
}
@Override
public String toString() {
return String.format(
"WaveHeader format=%d numChannels=%d sampleRate=%d bitsPerSample=%d numBytes=%d",
mFormat, mNumChannels, mSampleRate, mBitsPerSample, mNumBytes);
}
}

View File

@ -0,0 +1,6 @@
<HTML>
<BODY>
Simple, synchronous SREC speech recognition API.
@hide
</BODY>
</HTML>

View File

@ -0,0 +1,69 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech.tts;
import android.speech.tts.ITtsCallback;
import android.content.Intent;
/**
* AIDL for the TTS Service
* ITts.java is autogenerated from this.
*
* {@hide}
*/
interface ITts {
int setSpeechRate(in String callingApp, in int speechRate);
int setPitch(in String callingApp, in int pitch);
int speak(in String callingApp, in String text, in int queueMode, in String[] params);
boolean isSpeaking();
int stop(in String callingApp);
void addSpeech(in String callingApp, in String text, in String packageName, in int resId);
void addSpeechFile(in String callingApp, in String text, in String filename);
String[] getLanguage();
int isLanguageAvailable(in String language, in String country, in String variant, in String[] params);
int setLanguage(in String callingApp, in String language, in String country, in String variant);
boolean synthesizeToFile(in String callingApp, in String text, in String[] params, in String outputDirectory);
int playEarcon(in String callingApp, in String earcon, in int queueMode, in String[] params);
void addEarcon(in String callingApp, in String earcon, in String packageName, in int resId);
void addEarconFile(in String callingApp, in String earcon, in String filename);
int registerCallback(in String callingApp, ITtsCallback cb);
int unregisterCallback(in String callingApp, ITtsCallback cb);
int playSilence(in String callingApp, in long duration, in int queueMode, in String[] params);
int setEngineByPackageName(in String enginePackageName);
String getDefaultEngine();
boolean areDefaultsEnforced();
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech.tts;
/**
* AIDL for the callback from the TTS Service
* ITtsCallback.java is autogenerated from this.
*
* {@hide}
*/
oneway interface ITtsCallback {
void utteranceCompleted(String utteranceId);
}

File diff suppressed because it is too large Load Diff