M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@@ -0,0 +1,15 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := optional
LOCAL_SRC_FILES := $(call all-subdir-java-files) \
LOCAL_PACKAGE_NAME := TtsService
LOCAL_CERTIFICATE := platform
LOCAL_PROGUARD_FLAG_FILES := proguard.flags
include $(BUILD_PACKAGE)
include $(call all-makefiles-under,$(LOCAL_PATH))

View File

@@ -0,0 +1,17 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="android.tts">
<application android:label="TTS Service"
android:icon="@drawable/ic_launcher_text_to_speech">
<service android:enabled="true"
android:name=".TtsService"
android:label="TTS Service">
<intent-filter>
<action android:name="android.intent.action.START_TTS_SERVICE"/>
<category android:name="android.intent.category.TTS"/>
</intent-filter>
</service>
</application>
<uses-permission android:name="android.permission.INTERNET"/>
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
</manifest>

View File

@@ -0,0 +1,190 @@
Copyright (c) 2009, The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -0,0 +1,29 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
android_tts_SynthProxy.cpp
LOCAL_C_INCLUDES += \
$(JNI_H_INCLUDE)
LOCAL_SHARED_LIBRARIES := \
libandroid_runtime \
libnativehelper \
libmedia \
libutils \
libcutils
ifeq ($(TARGET_SIMULATOR),true)
LOCAL_LDLIBS += -ldl
else
LOCAL_SHARED_LIBRARIES += libdl
endif
LOCAL_MODULE:= libttssynthproxy
LOCAL_ARM_MODE := arm
include $(BUILD_SHARED_LIBRARY)

View File

@@ -0,0 +1,991 @@
/*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <unistd.h>
#define LOG_TAG "SynthProxy"
#include <utils/Log.h>
#include <nativehelper/jni.h>
#include <nativehelper/JNIHelp.h>
#include <android_runtime/AndroidRuntime.h>
#include <tts/TtsEngine.h>
#include <media/AudioTrack.h>
#include <math.h>
#include <dlfcn.h>
#define DEFAULT_TTS_RATE 16000
#define DEFAULT_TTS_FORMAT AudioSystem::PCM_16_BIT
#define DEFAULT_TTS_NB_CHANNELS 1
#define DEFAULT_TTS_BUFFERSIZE 2048
// TODO use the TTS stream type when available
#define DEFAULT_TTS_STREAM_TYPE AudioSystem::MUSIC
// EQ + BOOST parameters
#define FILTER_LOWSHELF_ATTENUATION -18.0f // in dB
#define FILTER_TRANSITION_FREQ 1100.0f // in Hz
#define FILTER_SHELF_SLOPE 1.0f // Q
#define FILTER_GAIN 5.5f // linear gain
#define USAGEMODE_PLAY_IMMEDIATELY 0
#define USAGEMODE_WRITE_TO_FILE 1
#define SYNTHPLAYSTATE_IS_STOPPED 0
#define SYNTHPLAYSTATE_IS_PLAYING 1
using namespace android;
// ----------------------------------------------------------------------------
struct fields_t {
jfieldID synthProxyFieldJniData;
jclass synthProxyClass;
jmethodID synthProxyMethodPost;
};
// structure to hold the data that is used each time the TTS engine has synthesized more data
struct afterSynthData_t {
jint jniStorage;
int usageMode;
FILE* outputFile;
AudioSystem::stream_type streamType;
};
// ----------------------------------------------------------------------------
// EQ data
double amp;
double w;
double sinw;
double cosw;
double beta;
double a0, a1, a2, b0, b1, b2;
double m_fa, m_fb, m_fc, m_fd, m_fe;
double x0; // x[n]
double x1; // x[n-1]
double x2; // x[n-2]
double out0;// y[n]
double out1;// y[n-1]
double out2;// y[n-2]
static float fFilterLowshelfAttenuation = FILTER_LOWSHELF_ATTENUATION;
static float fFilterTransitionFreq = FILTER_TRANSITION_FREQ;
static float fFilterShelfSlope = FILTER_SHELF_SLOPE;
static float fFilterGain = FILTER_GAIN;
static bool bUseFilter = false;
void initializeEQ() {
amp = float(pow(10.0, fFilterLowshelfAttenuation / 40.0));
w = 2.0 * M_PI * (fFilterTransitionFreq / DEFAULT_TTS_RATE);
sinw = float(sin(w));
cosw = float(cos(w));
beta = float(sqrt(amp)/fFilterShelfSlope);
// initialize low-shelf parameters
b0 = amp * ((amp+1.0F) - ((amp-1.0F)*cosw) + (beta*sinw));
b1 = 2.0F * amp * ((amp-1.0F) - ((amp+1.0F)*cosw));
b2 = amp * ((amp+1.0F) - ((amp-1.0F)*cosw) - (beta*sinw));
a0 = (amp+1.0F) + ((amp-1.0F)*cosw) + (beta*sinw);
a1 = 2.0F * ((amp-1.0F) + ((amp+1.0F)*cosw));
a2 = -((amp+1.0F) + ((amp-1.0F)*cosw) - (beta*sinw));
m_fa = fFilterGain * b0/a0;
m_fb = fFilterGain * b1/a0;
m_fc = fFilterGain * b2/a0;
m_fd = a1/a0;
m_fe = a2/a0;
}
void initializeFilter() {
x0 = 0.0f;
x1 = 0.0f;
x2 = 0.0f;
out0 = 0.0f;
out1 = 0.0f;
out2 = 0.0f;
}
void applyFilter(int16_t* buffer, size_t sampleCount) {
for (size_t i=0 ; i<sampleCount ; i++) {
x0 = (double) buffer[i];
out0 = (m_fa*x0) + (m_fb*x1) + (m_fc*x2) + (m_fd*out1) + (m_fe*out2);
x2 = x1;
x1 = x0;
out2 = out1;
out1 = out0;
if (out0 > 32767.0f) {
buffer[i] = 32767;
} else if (out0 < -32768.0f) {
buffer[i] = -32768;
} else {
buffer[i] = (int16_t) out0;
}
}
}
// ----------------------------------------------------------------------------
static fields_t javaTTSFields;
// TODO move to synth member once we have multiple simultaneous engines running
static Mutex engineMutex;
// ----------------------------------------------------------------------------
class SynthProxyJniStorage {
public :
jobject tts_ref;
TtsEngine* mNativeSynthInterface;
void* mEngineLibHandle;
AudioTrack* mAudioOut;
int8_t mPlayState;
Mutex mPlayLock;
AudioSystem::stream_type mStreamType;
uint32_t mSampleRate;
uint32_t mAudFormat;
int mNbChannels;
int8_t * mBuffer;
size_t mBufferSize;
SynthProxyJniStorage() {
tts_ref = NULL;
mNativeSynthInterface = NULL;
mEngineLibHandle = NULL;
mAudioOut = NULL;
mPlayState = SYNTHPLAYSTATE_IS_STOPPED;
mStreamType = DEFAULT_TTS_STREAM_TYPE;
mSampleRate = DEFAULT_TTS_RATE;
mAudFormat = DEFAULT_TTS_FORMAT;
mNbChannels = DEFAULT_TTS_NB_CHANNELS;
mBufferSize = DEFAULT_TTS_BUFFERSIZE;
mBuffer = new int8_t[mBufferSize];
memset(mBuffer, 0, mBufferSize);
}
~SynthProxyJniStorage() {
//LOGV("entering ~SynthProxyJniStorage()");
killAudio();
if (mNativeSynthInterface) {
mNativeSynthInterface->shutdown();
mNativeSynthInterface = NULL;
}
if (mEngineLibHandle) {
//LOGE("~SynthProxyJniStorage(): before close library");
int res = dlclose(mEngineLibHandle);
LOGE_IF( res != 0, "~SynthProxyJniStorage(): dlclose returned %d", res);
}
delete mBuffer;
}
void killAudio() {
if (mAudioOut) {
mAudioOut->stop();
delete mAudioOut;
mAudioOut = NULL;
}
}
void createAudioOut(AudioSystem::stream_type streamType, uint32_t rate,
AudioSystem::audio_format format, int channel) {
mSampleRate = rate;
mAudFormat = format;
mNbChannels = channel;
mStreamType = streamType;
// retrieve system properties to ensure successful creation of the
// AudioTrack object for playback
int afSampleRate;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
afSampleRate = 44100;
}
int afFrameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
afFrameCount = 2048;
}
uint32_t afLatency;
if (AudioSystem::getOutputLatency(&afLatency, mStreamType) != NO_ERROR) {
afLatency = 500;
}
uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
if (minBufCount < 2) minBufCount = 2;
int minFrameCount = (afFrameCount * rate * minBufCount)/afSampleRate;
mPlayLock.lock();
mAudioOut = new AudioTrack(mStreamType, rate, format,
(channel == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
minFrameCount > 4096 ? minFrameCount : 4096,
0, 0, 0, 0); // not using an AudioTrack callback
if (mAudioOut->initCheck() != NO_ERROR) {
LOGE("createAudioOut(): AudioTrack error");
delete mAudioOut;
mAudioOut = NULL;
} else {
//LOGI("AudioTrack OK");
mAudioOut->setVolume(1.0f, 1.0f);
LOGV("AudioTrack ready");
}
mPlayLock.unlock();
}
};
// ----------------------------------------------------------------------------
void prepAudioTrack(SynthProxyJniStorage* pJniData, AudioSystem::stream_type streamType,
uint32_t rate, AudioSystem::audio_format format, int channel) {
// Don't bother creating a new audiotrack object if the current
// object is already initialized with the same audio parameters.
if ( pJniData->mAudioOut &&
(rate == pJniData->mSampleRate) &&
(format == pJniData->mAudFormat) &&
(channel == pJniData->mNbChannels) &&
(streamType == pJniData->mStreamType) ){
return;
}
if (pJniData->mAudioOut){
pJniData->killAudio();
}
pJniData->createAudioOut(streamType, rate, format, channel);
}
// ----------------------------------------------------------------------------
/*
* Callback from TTS engine.
* Directly speaks using AudioTrack or write to file
*/
static tts_callback_status ttsSynthDoneCB(void *& userdata, uint32_t rate,
uint32_t format, int channel,
int8_t *&wav, size_t &bufferSize, tts_synth_status status) {
//LOGV("ttsSynthDoneCallback: %d bytes", bufferSize);
if (userdata == NULL){
LOGE("userdata == NULL");
return TTS_CALLBACK_HALT;
}
afterSynthData_t* pForAfter = (afterSynthData_t*)userdata;
SynthProxyJniStorage* pJniData = (SynthProxyJniStorage*)(pForAfter->jniStorage);
if (pForAfter->usageMode == USAGEMODE_PLAY_IMMEDIATELY){
//LOGV("Direct speech");
if (wav == NULL) {
delete pForAfter;
LOGV("Null: speech has completed");
}
if (bufferSize > 0) {
prepAudioTrack(pJniData, pForAfter->streamType, rate, (AudioSystem::audio_format)format, channel);
if (pJniData->mAudioOut) {
pJniData->mPlayLock.lock();
if(pJniData->mAudioOut->stopped()
&& (pJniData->mPlayState == SYNTHPLAYSTATE_IS_PLAYING)) {
pJniData->mAudioOut->start();
}
pJniData->mPlayLock.unlock();
if (bUseFilter) {
applyFilter((int16_t*)wav, bufferSize/2);
}
pJniData->mAudioOut->write(wav, bufferSize);
memset(wav, 0, bufferSize);
//LOGV("AudioTrack wrote: %d bytes", bufferSize);
} else {
LOGE("Can't play, null audiotrack");
}
}
} else if (pForAfter->usageMode == USAGEMODE_WRITE_TO_FILE) {
//LOGV("Save to file");
if (wav == NULL) {
delete pForAfter;
LOGV("Null: speech has completed");
return TTS_CALLBACK_HALT;
}
if (bufferSize > 0){
if (bUseFilter) {
applyFilter((int16_t*)wav, bufferSize/2);
}
fwrite(wav, 1, bufferSize, pForAfter->outputFile);
memset(wav, 0, bufferSize);
}
}
// Future update:
// For sync points in the speech, call back into the SynthProxy class through the
// javaTTSFields.synthProxyMethodPost methode to notify
// playback has completed if the synthesis is done or if a marker has been reached.
if (status == TTS_SYNTH_DONE) {
// this struct was allocated in the original android_tts_SynthProxy_speak call,
// all processing matching this call is now done.
LOGV("Speech synthesis done.");
if (pForAfter->usageMode == USAGEMODE_PLAY_IMMEDIATELY) {
// only delete for direct playback. When writing to a file, we still have work to do
// in android_tts_SynthProxy_synthesizeToFile. The struct will be deleted there.
delete pForAfter;
pForAfter = NULL;
}
return TTS_CALLBACK_HALT;
}
// we don't update the wav (output) parameter as we'll let the next callback
// write at the same location, we've consumed the data already, but we need
// to update bufferSize to let the TTS engine know how much it can write the
// next time it calls this function.
bufferSize = pJniData->mBufferSize;
return TTS_CALLBACK_CONTINUE;
}
// ----------------------------------------------------------------------------
static int
android_tts_SynthProxy_setLowShelf(JNIEnv *env, jobject thiz, jboolean applyFilter,
jfloat filterGain, jfloat attenuationInDb, jfloat freqInHz, jfloat slope)
{
int result = TTS_SUCCESS;
bUseFilter = applyFilter;
if (applyFilter) {
fFilterLowshelfAttenuation = attenuationInDb;
fFilterTransitionFreq = freqInHz;
fFilterShelfSlope = slope;
fFilterGain = filterGain;
if (fFilterShelfSlope != 0.0f) {
initializeEQ();
} else {
LOGE("Invalid slope, can't be null");
result = TTS_FAILURE;
}
}
return result;
}
// ----------------------------------------------------------------------------
static int
android_tts_SynthProxy_native_setup(JNIEnv *env, jobject thiz,
jobject weak_this, jstring nativeSoLib, jstring engConfig)
{
int result = TTS_FAILURE;
bUseFilter = false;
SynthProxyJniStorage* pJniStorage = new SynthProxyJniStorage();
prepAudioTrack(pJniStorage,
DEFAULT_TTS_STREAM_TYPE, DEFAULT_TTS_RATE, DEFAULT_TTS_FORMAT, DEFAULT_TTS_NB_CHANNELS);
const char *nativeSoLibNativeString = env->GetStringUTFChars(nativeSoLib, 0);
const char *engConfigString = env->GetStringUTFChars(engConfig, 0);
void *engine_lib_handle = dlopen(nativeSoLibNativeString,
RTLD_NOW | RTLD_LOCAL);
if (engine_lib_handle == NULL) {
LOGE("android_tts_SynthProxy_native_setup(): engine_lib_handle == NULL");
} else {
TtsEngine *(*get_TtsEngine)() =
reinterpret_cast<TtsEngine* (*)()>(dlsym(engine_lib_handle, "getTtsEngine"));
pJniStorage->mNativeSynthInterface = (*get_TtsEngine)();
pJniStorage->mEngineLibHandle = engine_lib_handle;
if (pJniStorage->mNativeSynthInterface) {
Mutex::Autolock l(engineMutex);
pJniStorage->mNativeSynthInterface->init(ttsSynthDoneCB, engConfigString);
}
result = TTS_SUCCESS;
}
// we use a weak reference so the SynthProxy object can be garbage collected.
pJniStorage->tts_ref = env->NewGlobalRef(weak_this);
// save the JNI resources so we can use them (and free them) later
env->SetIntField(thiz, javaTTSFields.synthProxyFieldJniData, (int)pJniStorage);
env->ReleaseStringUTFChars(nativeSoLib, nativeSoLibNativeString);
env->ReleaseStringUTFChars(engConfig, engConfigString);
return result;
}
static void
android_tts_SynthProxy_native_finalize(JNIEnv *env, jobject thiz, jint jniData)
{
//LOGV("entering android_tts_SynthProxy_finalize()");
if (jniData == 0) {
//LOGE("android_tts_SynthProxy_native_finalize(): invalid JNI data");
return;
}
Mutex::Autolock l(engineMutex);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
env->DeleteGlobalRef(pSynthData->tts_ref);
delete pSynthData;
env->SetIntField(thiz, javaTTSFields.synthProxyFieldJniData, 0);
}
static void
android_tts_SynthProxy_shutdown(JNIEnv *env, jobject thiz, jint jniData)
{
//LOGV("entering android_tts_SynthProxy_shutdown()");
// do everything a call to finalize would
android_tts_SynthProxy_native_finalize(env, thiz, jniData);
}
static int
android_tts_SynthProxy_isLanguageAvailable(JNIEnv *env, jobject thiz, jint jniData,
jstring language, jstring country, jstring variant)
{
int result = TTS_LANG_NOT_SUPPORTED;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_isLanguageAvailable(): invalid JNI data");
return result;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
const char *langNativeString = env->GetStringUTFChars(language, 0);
const char *countryNativeString = env->GetStringUTFChars(country, 0);
const char *variantNativeString = env->GetStringUTFChars(variant, 0);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->isLanguageAvailable(langNativeString,
countryNativeString, variantNativeString);
}
env->ReleaseStringUTFChars(language, langNativeString);
env->ReleaseStringUTFChars(country, countryNativeString);
env->ReleaseStringUTFChars(variant, variantNativeString);
return result;
}
static int
android_tts_SynthProxy_setConfig(JNIEnv *env, jobject thiz, jint jniData, jstring engineConfig)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_setConfig(): invalid JNI data");
return result;
}
Mutex::Autolock l(engineMutex);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
const char *engineConfigNativeString = env->GetStringUTFChars(engineConfig, 0);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->setProperty(ANDROID_TTS_ENGINE_PROPERTY_CONFIG,
engineConfigNativeString, strlen(engineConfigNativeString));
}
env->ReleaseStringUTFChars(engineConfig, engineConfigNativeString);
return result;
}
static int
android_tts_SynthProxy_setLanguage(JNIEnv *env, jobject thiz, jint jniData,
jstring language, jstring country, jstring variant)
{
int result = TTS_LANG_NOT_SUPPORTED;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_setLanguage(): invalid JNI data");
return result;
}
Mutex::Autolock l(engineMutex);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
const char *langNativeString = env->GetStringUTFChars(language, 0);
const char *countryNativeString = env->GetStringUTFChars(country, 0);
const char *variantNativeString = env->GetStringUTFChars(variant, 0);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->setLanguage(langNativeString,
countryNativeString, variantNativeString);
}
env->ReleaseStringUTFChars(language, langNativeString);
env->ReleaseStringUTFChars(country, countryNativeString);
env->ReleaseStringUTFChars(variant, variantNativeString);
return result;
}
static int
android_tts_SynthProxy_loadLanguage(JNIEnv *env, jobject thiz, jint jniData,
jstring language, jstring country, jstring variant)
{
int result = TTS_LANG_NOT_SUPPORTED;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_loadLanguage(): invalid JNI data");
return result;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
const char *langNativeString = env->GetStringUTFChars(language, 0);
const char *countryNativeString = env->GetStringUTFChars(country, 0);
const char *variantNativeString = env->GetStringUTFChars(variant, 0);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->loadLanguage(langNativeString,
countryNativeString, variantNativeString);
}
env->ReleaseStringUTFChars(language, langNativeString);
env->ReleaseStringUTFChars(country, countryNativeString);
env->ReleaseStringUTFChars(variant, variantNativeString);
return result;
}
static int
android_tts_SynthProxy_setSpeechRate(JNIEnv *env, jobject thiz, jint jniData,
jint speechRate)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_setSpeechRate(): invalid JNI data");
return result;
}
int bufSize = 12;
char buffer [bufSize];
sprintf(buffer, "%d", speechRate);
Mutex::Autolock l(engineMutex);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
LOGI("setting speech rate to %d", speechRate);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->setProperty("rate", buffer, bufSize);
}
return result;
}
static int
android_tts_SynthProxy_setPitch(JNIEnv *env, jobject thiz, jint jniData,
jint pitch)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_setPitch(): invalid JNI data");
return result;
}
Mutex::Autolock l(engineMutex);
int bufSize = 12;
char buffer [bufSize];
sprintf(buffer, "%d", pitch);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
LOGI("setting pitch to %d", pitch);
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->setProperty("pitch", buffer, bufSize);
}
return result;
}
static int
android_tts_SynthProxy_synthesizeToFile(JNIEnv *env, jobject thiz, jint jniData,
jstring textJavaString, jstring filenameJavaString)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_synthesizeToFile(): invalid JNI data");
return result;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
if (!pSynthData->mNativeSynthInterface) {
LOGE("android_tts_SynthProxy_synthesizeToFile(): invalid engine handle");
return result;
}
initializeFilter();
Mutex::Autolock l(engineMutex);
// Retrieve audio parameters before writing the file header
AudioSystem::audio_format encoding = DEFAULT_TTS_FORMAT;
uint32_t rate = DEFAULT_TTS_RATE;
int channels = DEFAULT_TTS_NB_CHANNELS;
pSynthData->mNativeSynthInterface->setAudioFormat(encoding, rate, channels);
if ((encoding != AudioSystem::PCM_16_BIT) && (encoding != AudioSystem::PCM_8_BIT)) {
LOGE("android_tts_SynthProxy_synthesizeToFile(): engine uses invalid format");
return result;
}
const char *filenameNativeString =
env->GetStringUTFChars(filenameJavaString, 0);
const char *textNativeString = env->GetStringUTFChars(textJavaString, 0);
afterSynthData_t* pForAfter = new (afterSynthData_t);
pForAfter->jniStorage = jniData;
pForAfter->usageMode = USAGEMODE_WRITE_TO_FILE;
pForAfter->outputFile = fopen(filenameNativeString, "wb");
if (pForAfter->outputFile == NULL) {
LOGE("android_tts_SynthProxy_synthesizeToFile(): error creating output file");
delete pForAfter;
return result;
}
// Write 44 blank bytes for WAV header, then come back and fill them in
// after we've written the audio data
char header[44];
fwrite(header, 1, 44, pForAfter->outputFile);
unsigned int unique_identifier;
memset(pSynthData->mBuffer, 0, pSynthData->mBufferSize);
result = pSynthData->mNativeSynthInterface->synthesizeText(textNativeString,
pSynthData->mBuffer, pSynthData->mBufferSize, (void *)pForAfter);
long filelen = ftell(pForAfter->outputFile);
int samples = (((int)filelen) - 44) / 2;
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
((uint32_t *)(&header[4]))[0] = filelen - 8;
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
((uint32_t *)(&header[16]))[0] = 16; // size of fmt
int sampleSizeInByte = (encoding == AudioSystem::PCM_16_BIT ? 2 : 1);
((unsigned short *)(&header[20]))[0] = 1; // format
((unsigned short *)(&header[22]))[0] = channels; // channels
((uint32_t *)(&header[24]))[0] = rate; // samplerate
((uint32_t *)(&header[28]))[0] = rate * sampleSizeInByte * channels;// byterate
((unsigned short *)(&header[32]))[0] = sampleSizeInByte * channels; // block align
((unsigned short *)(&header[34]))[0] = sampleSizeInByte * 8; // bits per sample
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
((uint32_t *)(&header[40]))[0] = samples * 2; // size of data
// Skip back to the beginning and rewrite the header
fseek(pForAfter->outputFile, 0, SEEK_SET);
fwrite(header, 1, 44, pForAfter->outputFile);
fflush(pForAfter->outputFile);
fclose(pForAfter->outputFile);
delete pForAfter;
pForAfter = NULL;
env->ReleaseStringUTFChars(textJavaString, textNativeString);
env->ReleaseStringUTFChars(filenameJavaString, filenameNativeString);
return result;
}
static int
android_tts_SynthProxy_speak(JNIEnv *env, jobject thiz, jint jniData,
jstring textJavaString, jint javaStreamType)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_speak(): invalid JNI data");
return result;
}
initializeFilter();
Mutex::Autolock l(engineMutex);
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
pSynthData->mPlayLock.lock();
pSynthData->mPlayState = SYNTHPLAYSTATE_IS_PLAYING;
pSynthData->mPlayLock.unlock();
afterSynthData_t* pForAfter = new (afterSynthData_t);
pForAfter->jniStorage = jniData;
pForAfter->usageMode = USAGEMODE_PLAY_IMMEDIATELY;
pForAfter->streamType = (AudioSystem::stream_type) javaStreamType;
if (pSynthData->mNativeSynthInterface) {
const char *textNativeString = env->GetStringUTFChars(textJavaString, 0);
memset(pSynthData->mBuffer, 0, pSynthData->mBufferSize);
result = pSynthData->mNativeSynthInterface->synthesizeText(textNativeString,
pSynthData->mBuffer, pSynthData->mBufferSize, (void *)pForAfter);
env->ReleaseStringUTFChars(textJavaString, textNativeString);
}
return result;
}
static int
android_tts_SynthProxy_stop(JNIEnv *env, jobject thiz, jint jniData)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_stop(): invalid JNI data");
return result;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
pSynthData->mPlayLock.lock();
pSynthData->mPlayState = SYNTHPLAYSTATE_IS_STOPPED;
if (pSynthData->mAudioOut) {
pSynthData->mAudioOut->stop();
}
pSynthData->mPlayLock.unlock();
if (pSynthData->mNativeSynthInterface) {
result = pSynthData->mNativeSynthInterface->stop();
}
return result;
}
static int
android_tts_SynthProxy_stopSync(JNIEnv *env, jobject thiz, jint jniData)
{
int result = TTS_FAILURE;
if (jniData == 0) {
LOGE("android_tts_SynthProxy_stop(): invalid JNI data");
return result;
}
// perform a regular stop
result = android_tts_SynthProxy_stop(env, thiz, jniData);
// but wait on the engine having released the engine mutex which protects
// the synthesizer resources.
engineMutex.lock();
engineMutex.unlock();
return result;
}
static jobjectArray
android_tts_SynthProxy_getLanguage(JNIEnv *env, jobject thiz, jint jniData)
{
if (jniData == 0) {
LOGE("android_tts_SynthProxy_getLanguage(): invalid JNI data");
return NULL;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
if (pSynthData->mNativeSynthInterface) {
size_t bufSize = 100;
char lang[bufSize];
char country[bufSize];
char variant[bufSize];
memset(lang, 0, bufSize);
memset(country, 0, bufSize);
memset(variant, 0, bufSize);
jobjectArray retLocale = (jobjectArray)env->NewObjectArray(3,
env->FindClass("java/lang/String"), env->NewStringUTF(""));
pSynthData->mNativeSynthInterface->getLanguage(lang, country, variant);
env->SetObjectArrayElement(retLocale, 0, env->NewStringUTF(lang));
env->SetObjectArrayElement(retLocale, 1, env->NewStringUTF(country));
env->SetObjectArrayElement(retLocale, 2, env->NewStringUTF(variant));
return retLocale;
} else {
return NULL;
}
}
JNIEXPORT int JNICALL
android_tts_SynthProxy_getRate(JNIEnv *env, jobject thiz, jint jniData)
{
if (jniData == 0) {
LOGE("android_tts_SynthProxy_getRate(): invalid JNI data");
return 0;
}
SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
size_t bufSize = 100;
char buf[bufSize];
memset(buf, 0, bufSize);
// TODO check return codes
if (pSynthData->mNativeSynthInterface) {
pSynthData->mNativeSynthInterface->getProperty("rate", buf, &bufSize);
}
return atoi(buf);
}
// Dalvik VM type signatures
static JNINativeMethod gMethods[] = {
{ "native_stop",
"(I)I",
(void*)android_tts_SynthProxy_stop
},
{ "native_stopSync",
"(I)I",
(void*)android_tts_SynthProxy_stopSync
},
{ "native_speak",
"(ILjava/lang/String;I)I",
(void*)android_tts_SynthProxy_speak
},
{ "native_synthesizeToFile",
"(ILjava/lang/String;Ljava/lang/String;)I",
(void*)android_tts_SynthProxy_synthesizeToFile
},
{ "native_isLanguageAvailable",
"(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)I",
(void*)android_tts_SynthProxy_isLanguageAvailable
},
{ "native_setConfig",
"(ILjava/lang/String;)I",
(void*)android_tts_SynthProxy_setConfig
},
{ "native_setLanguage",
"(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)I",
(void*)android_tts_SynthProxy_setLanguage
},
{ "native_loadLanguage",
"(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)I",
(void*)android_tts_SynthProxy_loadLanguage
},
{ "native_setSpeechRate",
"(II)I",
(void*)android_tts_SynthProxy_setSpeechRate
},
{ "native_setPitch",
"(II)I",
(void*)android_tts_SynthProxy_setPitch
},
{ "native_getLanguage",
"(I)[Ljava/lang/String;",
(void*)android_tts_SynthProxy_getLanguage
},
{ "native_getRate",
"(I)I",
(void*)android_tts_SynthProxy_getRate
},
{ "native_shutdown",
"(I)V",
(void*)android_tts_SynthProxy_shutdown
},
{ "native_setup",
"(Ljava/lang/Object;Ljava/lang/String;Ljava/lang/String;)I",
(void*)android_tts_SynthProxy_native_setup
},
{ "native_setLowShelf",
"(ZFFFF)I",
(void*)android_tts_SynthProxy_setLowShelf
},
{ "native_finalize",
"(I)V",
(void*)android_tts_SynthProxy_native_finalize
}
};
#define SP_JNIDATA_FIELD_NAME "mJniData"
#define SP_POSTSPEECHSYNTHESIZED_METHOD_NAME "postNativeSpeechSynthesizedInJava"
static const char* const kClassPathName = "android/tts/SynthProxy";
jint JNI_OnLoad(JavaVM* vm, void* reserved)
{
JNIEnv* env = NULL;
jint result = -1;
jclass clazz;
if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
LOGE("ERROR: GetEnv failed\n");
goto bail;
}
assert(env != NULL);
clazz = env->FindClass(kClassPathName);
if (clazz == NULL) {
LOGE("Can't find %s", kClassPathName);
goto bail;
}
javaTTSFields.synthProxyClass = clazz;
javaTTSFields.synthProxyFieldJniData = NULL;
javaTTSFields.synthProxyMethodPost = NULL;
javaTTSFields.synthProxyFieldJniData = env->GetFieldID(clazz,
SP_JNIDATA_FIELD_NAME, "I");
if (javaTTSFields.synthProxyFieldJniData == NULL) {
LOGE("Can't find %s.%s field", kClassPathName, SP_JNIDATA_FIELD_NAME);
goto bail;
}
javaTTSFields.synthProxyMethodPost = env->GetStaticMethodID(clazz,
SP_POSTSPEECHSYNTHESIZED_METHOD_NAME, "(Ljava/lang/Object;II)V");
if (javaTTSFields.synthProxyMethodPost == NULL) {
LOGE("Can't find %s.%s method", kClassPathName, SP_POSTSPEECHSYNTHESIZED_METHOD_NAME);
goto bail;
}
if (jniRegisterNativeMethods(
env, kClassPathName, gMethods, NELEM(gMethods)) < 0)
goto bail;
/* success -- return valid version number */
result = JNI_VERSION_1_4;
bail:
return result;
}

View File

@@ -0,0 +1,5 @@
-keep class android.tts.SynthProxy {
int mJniData;
# keep all declarations for native methods
<methods>;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

@@ -0,0 +1,235 @@
/*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package android.tts;
import android.media.AudioManager;
import android.media.AudioSystem;
import android.util.Log;
import java.lang.ref.WeakReference;
/**
* @hide
*
* The SpeechSynthesis class provides a high-level api to create and play
* synthesized speech. This class is used internally to talk to a native
* TTS library that implements the interface defined in
* frameworks/base/include/tts/TtsEngine.h
*
*/
@SuppressWarnings("unused")
public class SynthProxy {
// Default parameters of a filter to be applied when using the Pico engine.
// Such a huge filter gain is justified by how much energy in the low frequencies is "wasted" at
// the output of the synthesis. The low shelving filter removes it, leaving room for
// amplification.
private final static float PICO_FILTER_GAIN = 5.0f; // linear gain
private final static float PICO_FILTER_LOWSHELF_ATTENUATION = -18.0f; // in dB
private final static float PICO_FILTER_TRANSITION_FREQ = 1100.0f; // in Hz
private final static float PICO_FILTER_SHELF_SLOPE = 1.0f; // Q
//
// External API
//
/**
* Constructor; pass the location of the native TTS .so to use.
*/
public SynthProxy(String nativeSoLib, String engineConfig) {
boolean applyFilter = nativeSoLib.toLowerCase().contains("pico");
Log.v(TtsService.SERVICE_TAG, "About to load "+ nativeSoLib + ", applyFilter="+applyFilter);
native_setup(new WeakReference<SynthProxy>(this), nativeSoLib, engineConfig);
native_setLowShelf(applyFilter, PICO_FILTER_GAIN, PICO_FILTER_LOWSHELF_ATTENUATION,
PICO_FILTER_TRANSITION_FREQ, PICO_FILTER_SHELF_SLOPE);
}
/**
* Stops and clears the AudioTrack.
*/
public int stop() {
return native_stop(mJniData);
}
/**
* Synchronous stop of the synthesizer. This method returns when the synth
* has completed the stop procedure and doesn't use any of the resources it
* was using while synthesizing.
*
* @return {@link android.speech.tts.TextToSpeech.SUCCESS} or
* {@link android.speech.tts.TextToSpeech.ERROR}
*/
public int stopSync() {
return native_stopSync(mJniData);
}
/**
* Synthesize speech and speak it directly using AudioTrack.
*/
public int speak(String text, int streamType) {
if ((streamType > -1) && (streamType < AudioSystem.getNumStreamTypes())) {
return native_speak(mJniData, text, streamType);
} else {
Log.e("SynthProxy", "Trying to speak with invalid stream type " + streamType);
return native_speak(mJniData, text, AudioManager.STREAM_MUSIC);
}
}
/**
* Synthesize speech to a file. The current implementation writes a valid
* WAV file to the given path, assuming it is writable. Something like
* "/sdcard/???.wav" is recommended.
*/
public int synthesizeToFile(String text, String filename) {
return native_synthesizeToFile(mJniData, text, filename);
}
/**
* Queries for language support.
* Return codes are defined in android.speech.tts.TextToSpeech
*/
public int isLanguageAvailable(String language, String country, String variant) {
return native_isLanguageAvailable(mJniData, language, country, variant);
}
/**
* Updates the engine configuration.
*/
public int setConfig(String engineConfig) {
return native_setConfig(mJniData, engineConfig);
}
/**
* Sets the language.
*/
public int setLanguage(String language, String country, String variant) {
return native_setLanguage(mJniData, language, country, variant);
}
/**
* Loads the language: it's not set, but prepared for use later.
*/
public int loadLanguage(String language, String country, String variant) {
return native_loadLanguage(mJniData, language, country, variant);
}
/**
* Sets the speech rate.
*/
public final int setSpeechRate(int speechRate) {
return native_setSpeechRate(mJniData, speechRate);
}
/**
* Sets the pitch of the synthesized voice.
*/
public final int setPitch(int pitch) {
return native_setPitch(mJniData, pitch);
}
/**
* Returns the currently set language, country and variant information.
*/
public String[] getLanguage() {
return native_getLanguage(mJniData);
}
/**
* Gets the currently set rate.
*/
public int getRate() {
return native_getRate(mJniData);
}
/**
* Shuts down the native synthesizer.
*/
public void shutdown() {
native_shutdown(mJniData);
}
//
// Internal
//
protected void finalize() {
native_finalize(mJniData);
mJniData = 0;
}
static {
System.loadLibrary("ttssynthproxy");
}
private final static String TAG = "SynthProxy";
/**
* Accessed by native methods
*/
private int mJniData = 0;
private native final int native_setup(Object weak_this, String nativeSoLib,
String engineConfig);
private native final int native_setLowShelf(boolean applyFilter, float filterGain,
float attenuationInDb, float freqInHz, float slope);
private native final void native_finalize(int jniData);
private native final int native_stop(int jniData);
private native final int native_stopSync(int jniData);
private native final int native_speak(int jniData, String text, int streamType);
private native final int native_synthesizeToFile(int jniData, String text, String filename);
private native final int native_isLanguageAvailable(int jniData, String language,
String country, String variant);
private native final int native_setLanguage(int jniData, String language, String country,
String variant);
private native final int native_loadLanguage(int jniData, String language, String country,
String variant);
private native final int native_setConfig(int jniData, String engineConfig);
private native final int native_setSpeechRate(int jniData, int speechRate);
private native final int native_setPitch(int jniData, int speechRate);
private native final String[] native_getLanguage(int jniData);
private native final int native_getRate(int jniData);
private native final void native_shutdown(int jniData);
/**
* Callback from the C layer
*/
@SuppressWarnings("unused")
private static void postNativeSpeechSynthesizedInJava(Object tts_ref,
int bufferPointer, int bufferSize) {
Log.i("TTS plugin debug", "bufferPointer: " + bufferPointer
+ " bufferSize: " + bufferSize);
SynthProxy nativeTTS = (SynthProxy)((WeakReference)tts_ref).get();
// TODO notify TTS service of synthesis/playback completion,
// method definition to be changed.
}
}

File diff suppressed because it is too large Load Diff