1 /* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.speech; 18 19 import android.Manifest; 20 import android.annotation.CallbackExecutor; 21 import android.annotation.IntDef; 22 import android.annotation.MainThread; 23 import android.annotation.NonNull; 24 import android.annotation.Nullable; 25 import android.annotation.RequiresPermission; 26 import android.annotation.TestApi; 27 import android.content.ComponentName; 28 import android.content.Context; 29 import android.content.Intent; 30 import android.content.pm.ResolveInfo; 31 import android.os.Binder; 32 import android.os.Bundle; 33 import android.os.Handler; 34 import android.os.IBinder; 35 import android.os.Looper; 36 import android.os.Message; 37 import android.os.RemoteException; 38 import android.os.ServiceManager; 39 import android.provider.Settings; 40 import android.text.TextUtils; 41 import android.util.Log; 42 import android.util.Slog; 43 44 import com.android.internal.R; 45 46 import java.lang.annotation.Documented; 47 import java.lang.annotation.Retention; 48 import java.lang.annotation.RetentionPolicy; 49 import java.util.List; 50 import java.util.Objects; 51 import java.util.Queue; 52 import java.util.concurrent.Executor; 53 import java.util.concurrent.LinkedBlockingQueue; 54 55 /** 56 * This class provides access to the speech recognition service. This service allows access to the 57 * speech recognizer. Do not instantiate this class directly, instead, call 58 * {@link SpeechRecognizer#createSpeechRecognizer(Context)}, or 59 * {@link SpeechRecognizer#createOnDeviceSpeechRecognizer(Context)}. This class's methods must be 60 * invoked only from the main application thread. 61 * 62 * <p>The implementation of this API is likely to stream audio to remote servers to perform speech 63 * recognition. As such this API is not intended to be used for continuous recognition, which would 64 * consume a significant amount of battery and bandwidth. 65 * 66 * <p>Please note that the application must have {@link android.Manifest.permission#RECORD_AUDIO} 67 * permission to use this class. 68 */ 69 public class SpeechRecognizer { 70 /** DEBUG value to enable verbose debug prints */ 71 private static final boolean DBG = false; 72 73 /** Log messages identifier */ 74 private static final String TAG = "SpeechRecognizer"; 75 76 /** 77 * Key used to retrieve an {@code ArrayList<String>} from the {@link Bundle} passed to the 78 * {@link RecognitionListener#onResults(Bundle)} and 79 * {@link RecognitionListener#onPartialResults(Bundle)} methods. These strings are the possible 80 * recognition results, where the first element is the most likely candidate. 81 */ 82 public static final String RESULTS_RECOGNITION = "results_recognition"; 83 84 /** 85 * Key used to retrieve a float array from the {@link Bundle} passed to the 86 * {@link RecognitionListener#onResults(Bundle)} and 87 * {@link RecognitionListener#onPartialResults(Bundle)} methods. The array should be 88 * the same size as the ArrayList provided in {@link #RESULTS_RECOGNITION}, and should contain 89 * values ranging from 0.0 to 1.0, or -1 to represent an unavailable confidence score. 90 * <p> 91 * Confidence values close to 1.0 indicate high confidence (the speech recognizer is confident 92 * that the recognition result is correct), while values close to 0.0 indicate low confidence. 93 * <p> 94 * This value is optional and might not be provided. 95 */ 96 public static final String CONFIDENCE_SCORES = "confidence_scores"; 97 98 /** 99 * Key used to retrieve an ArrayList<{@link AlternativeSpans}> from the {@link Bundle} 100 * passed to the {@link RecognitionListener#onResults(Bundle)} and 101 * {@link RecognitionListener#onPartialResults(Bundle)} methods. The list should be the same 102 * size as the ArrayList provided in {@link #RESULTS_RECOGNITION}. 103 * 104 * <p> A single {@link SpeechRecognizer} result is represented as a {@link String}. For a 105 * specific span (substring) of the originally recognized result string the recognizer provides 106 * a list of alternative hypotheses in the form of an {@link AlternativeSpan} object. 107 * Alternatives for different spans of a result string are listed in an {@link AlternativeSpans} 108 * object. Each item from the ArrayList retrieved by this key corresponds to a single result 109 * string provided in {@link #RESULTS_RECOGNITION}. 110 * 111 * <p> This value is optional and might not be provided. 112 */ 113 public static final String RESULTS_ALTERNATIVES = "results_alternatives"; 114 115 /** 116 * Key used to receive an ArrayList<{@link RecognitionPart}> object from the 117 * {@link Bundle} passed to the {@link RecognitionListener#onResults(Bundle)} and 118 * {@link RecognitionListener#onSegmentResults(Bundle)} methods. 119 * 120 * <p> A single {@link SpeechRecognizer} result is represented as a {@link String}. Each word of 121 * the resulting String, as well as any potential adjacent punctuation, is represented by a 122 * {@link RecognitionPart} item from the ArrayList retrieved by this key. 123 */ 124 public static final String RECOGNITION_PARTS = "recognition_parts"; 125 126 /** 127 * Key used to retrieve a {@link String} representation of the IETF language tag (as defined by 128 * BCP 47, e.g., "en-US", "de-DE") of the detected language of the most recent audio chunk. 129 * 130 * <p> This info is returned to the client in the {@link Bundle} passed to 131 * {@link RecognitionListener#onLanguageDetection(Bundle)} only if 132 * {@link RecognizerIntent#EXTRA_ENABLE_LANGUAGE_DETECTION} is set. Additionally, if 133 * {@link RecognizerIntent#EXTRA_LANGUAGE_DETECTION_ALLOWED_LANGUAGES} are listed, 134 * the detected language is constrained to be one from the list. 135 */ 136 public static final String DETECTED_LANGUAGE = "detected_language"; 137 138 /** 139 * Key used to retrieve the level of confidence of the detected language 140 * of the most recent audio chunk, 141 * represented by an {@code int} value prefixed by {@code LANGUAGE_DETECTION_CONFIDENCE_LEVEL_}. 142 * 143 * <p> This info is returned to the client in the {@link Bundle} passed to 144 * {@link RecognitionListener#onLanguageDetection(Bundle)} only if 145 * {@link RecognizerIntent#EXTRA_ENABLE_LANGUAGE_DETECTION} is set. 146 */ 147 public static final String LANGUAGE_DETECTION_CONFIDENCE_LEVEL = 148 "language_detection_confidence_level"; 149 150 /** 151 * The level of language detection confidence. 152 * 153 * @hide 154 */ 155 @Documented 156 @Retention(RetentionPolicy.SOURCE) 157 @IntDef(prefix = {"LANGUAGE_DETECTION_CONFIDENCE_LEVEL_"}, value = { 158 LANGUAGE_DETECTION_CONFIDENCE_LEVEL_UNKNOWN, 159 LANGUAGE_DETECTION_CONFIDENCE_LEVEL_NOT_CONFIDENT, 160 LANGUAGE_DETECTION_CONFIDENCE_LEVEL_CONFIDENT, 161 LANGUAGE_DETECTION_CONFIDENCE_LEVEL_HIGHLY_CONFIDENT 162 }) 163 public @interface LanguageDetectionConfidenceLevel {} 164 165 public static final int LANGUAGE_DETECTION_CONFIDENCE_LEVEL_UNKNOWN = 0; 166 public static final int LANGUAGE_DETECTION_CONFIDENCE_LEVEL_NOT_CONFIDENT = 1; 167 public static final int LANGUAGE_DETECTION_CONFIDENCE_LEVEL_CONFIDENT = 2; 168 public static final int LANGUAGE_DETECTION_CONFIDENCE_LEVEL_HIGHLY_CONFIDENT = 3; 169 170 /** 171 * Key used to retrieve an ArrayList<{@link String}> containing representations of the 172 * IETF language tags (as defined by BCP 47, e.g., "en-US", "en-UK") denoting the alternative 173 * locales for the same language retrieved by the key {@link #DETECTED_LANGUAGE}. 174 * 175 * This info is returned to the client in the {@link Bundle} passed to 176 * {@link RecognitionListener#onLanguageDetection(Bundle)} only if 177 * {@link RecognizerIntent#EXTRA_ENABLE_LANGUAGE_DETECTION} is set. 178 */ 179 public static final String TOP_LOCALE_ALTERNATIVES = "top_locale_alternatives"; 180 181 /** 182 * Key used to retrieve the result of the language switch of the most recent audio chunk, 183 * represented by an {@code int} value prefixed by {@code LANGUAGE_SWITCH_}. 184 * 185 * <p> This info is returned to the client in the {@link Bundle} passed to the 186 * {@link RecognitionListener#onLanguageDetection(Bundle)} only if 187 * {@link RecognizerIntent#EXTRA_ENABLE_LANGUAGE_SWITCH} is set. 188 */ 189 public static final String LANGUAGE_SWITCH_RESULT = "language_switch_result"; 190 191 /** 192 * The result of the language switch. 193 * 194 * @hide 195 */ 196 @Documented 197 @Retention(RetentionPolicy.SOURCE) 198 @IntDef(prefix = {"LANGUAGE_SWITCH_RESULT_"}, value = { 199 LANGUAGE_SWITCH_RESULT_NOT_ATTEMPTED, 200 LANGUAGE_SWITCH_RESULT_SUCCEEDED, 201 LANGUAGE_SWITCH_RESULT_FAILED, 202 LANGUAGE_SWITCH_RESULT_SKIPPED_NO_MODEL 203 }) 204 public @interface LanguageSwitchResult {} 205 206 /** Switch not attempted. */ 207 public static final int LANGUAGE_SWITCH_RESULT_NOT_ATTEMPTED = 0; 208 209 /** Switch attempted and succeeded. */ 210 public static final int LANGUAGE_SWITCH_RESULT_SUCCEEDED = 1; 211 212 /** Switch attempted and failed. */ 213 public static final int LANGUAGE_SWITCH_RESULT_FAILED = 2; 214 215 /** 216 * Switch skipped because the language model is missing 217 * or the language is not allowlisted for auto switch. 218 */ 219 public static final int LANGUAGE_SWITCH_RESULT_SKIPPED_NO_MODEL = 3; 220 221 /** 222 * The reason speech recognition failed. 223 * 224 * @hide 225 */ 226 @Documented 227 @Retention(RetentionPolicy.SOURCE) 228 @IntDef(prefix = {"ERROR_"}, value = { 229 ERROR_NETWORK_TIMEOUT, 230 ERROR_NETWORK, 231 ERROR_AUDIO, 232 ERROR_SERVER, 233 ERROR_CLIENT, 234 ERROR_SPEECH_TIMEOUT, 235 ERROR_NO_MATCH, 236 ERROR_RECOGNIZER_BUSY, 237 ERROR_INSUFFICIENT_PERMISSIONS, 238 ERROR_TOO_MANY_REQUESTS, 239 ERROR_SERVER_DISCONNECTED, 240 ERROR_LANGUAGE_NOT_SUPPORTED, 241 ERROR_LANGUAGE_UNAVAILABLE, 242 ERROR_CANNOT_CHECK_SUPPORT, 243 ERROR_CANNOT_LISTEN_TO_DOWNLOAD_EVENTS, 244 }) 245 public @interface RecognitionError {} 246 247 /** Network operation timed out. */ 248 public static final int ERROR_NETWORK_TIMEOUT = 1; 249 250 /** Other network related errors. */ 251 public static final int ERROR_NETWORK = 2; 252 253 /** Audio recording error. */ 254 public static final int ERROR_AUDIO = 3; 255 256 /** Server sends error status. */ 257 public static final int ERROR_SERVER = 4; 258 259 /** Other client side errors. */ 260 public static final int ERROR_CLIENT = 5; 261 262 /** No speech input */ 263 public static final int ERROR_SPEECH_TIMEOUT = 6; 264 265 /** No recognition result matched. */ 266 public static final int ERROR_NO_MATCH = 7; 267 268 /** RecognitionService busy. */ 269 public static final int ERROR_RECOGNIZER_BUSY = 8; 270 271 /** Insufficient permissions */ 272 public static final int ERROR_INSUFFICIENT_PERMISSIONS = 9; 273 274 /** Too many requests from the same client. */ 275 public static final int ERROR_TOO_MANY_REQUESTS = 10; 276 277 /** Server has been disconnected, e.g. because the app has crashed. */ 278 public static final int ERROR_SERVER_DISCONNECTED = 11; 279 280 /** Requested language is not available to be used with the current recognizer. */ 281 public static final int ERROR_LANGUAGE_NOT_SUPPORTED = 12; 282 283 /** Requested language is supported, but not available currently (e.g. not downloaded yet). */ 284 public static final int ERROR_LANGUAGE_UNAVAILABLE = 13; 285 286 /** The service does not allow to check for support. */ 287 public static final int ERROR_CANNOT_CHECK_SUPPORT = 14; 288 289 /** The service does not support listening to model downloads events. */ 290 public static final int ERROR_CANNOT_LISTEN_TO_DOWNLOAD_EVENTS = 15; 291 292 /** action codes */ 293 private static final int MSG_START = 1; 294 private static final int MSG_STOP = 2; 295 private static final int MSG_CANCEL = 3; 296 private static final int MSG_CHANGE_LISTENER = 4; 297 private static final int MSG_SET_TEMPORARY_ON_DEVICE_COMPONENT = 5; 298 private static final int MSG_CHECK_RECOGNITION_SUPPORT = 6; 299 private static final int MSG_TRIGGER_MODEL_DOWNLOAD = 7; 300 301 /** The actual RecognitionService endpoint */ 302 private IRecognitionService mService; 303 304 /** Context with which the manager was created */ 305 private final Context mContext; 306 307 /** Component to direct service intent to */ 308 private final ComponentName mServiceComponent; 309 310 /** Whether to use on-device speech recognizer. */ 311 private final boolean mOnDevice; 312 313 private IRecognitionServiceManager mManagerService; 314 315 /** Handler that will execute the main tasks */ 316 private Handler mHandler = new Handler(Looper.getMainLooper()) { 317 318 @Override 319 public void handleMessage(Message msg) { 320 switch (msg.what) { 321 case MSG_START: 322 handleStartListening((Intent) msg.obj); 323 break; 324 case MSG_STOP: 325 handleStopMessage(); 326 break; 327 case MSG_CANCEL: 328 handleCancelMessage(); 329 break; 330 case MSG_CHANGE_LISTENER: 331 handleChangeListener((RecognitionListener) msg.obj); 332 break; 333 case MSG_SET_TEMPORARY_ON_DEVICE_COMPONENT: 334 handleSetTemporaryComponent((ComponentName) msg.obj); 335 break; 336 case MSG_CHECK_RECOGNITION_SUPPORT: 337 CheckRecognitionSupportArgs args = (CheckRecognitionSupportArgs) msg.obj; 338 handleCheckRecognitionSupport( 339 args.mIntent, args.mCallbackExecutor, args.mCallback); 340 break; 341 case MSG_TRIGGER_MODEL_DOWNLOAD: 342 ModelDownloadListenerArgs modelDownloadListenerArgs = 343 (ModelDownloadListenerArgs) msg.obj; 344 handleTriggerModelDownload( 345 modelDownloadListenerArgs.mIntent, 346 modelDownloadListenerArgs.mExecutor, 347 modelDownloadListenerArgs.mModelDownloadListener); 348 break; 349 } 350 } 351 }; 352 353 /** 354 * Temporary queue, saving the messages until the connection will be established, afterwards, 355 * only mHandler will receive the messages 356 */ 357 private final Queue<Message> mPendingTasks = new LinkedBlockingQueue<>(); 358 359 /** The Listener that will receive all the callbacks */ 360 private final InternalRecognitionListener mListener = new InternalRecognitionListener(); 361 362 private final IBinder mClientToken = new Binder(); 363 364 /** 365 * The right way to create a {@code SpeechRecognizer} is by using 366 * {@link #createSpeechRecognizer} static factory method 367 */ SpeechRecognizer(final Context context, final ComponentName serviceComponent)368 private SpeechRecognizer(final Context context, final ComponentName serviceComponent) { 369 mContext = context; 370 mServiceComponent = serviceComponent; 371 mOnDevice = false; 372 } 373 374 /** 375 * The right way to create a {@code SpeechRecognizer} is by using 376 * {@link #createOnDeviceSpeechRecognizer} static factory method 377 */ SpeechRecognizer(final Context context, boolean onDevice)378 private SpeechRecognizer(final Context context, boolean onDevice) { 379 mContext = context; 380 mServiceComponent = null; 381 mOnDevice = onDevice; 382 } 383 384 /** 385 * Checks whether a speech recognition service is available on the system. If this method 386 * returns {@code false}, {@link SpeechRecognizer#createSpeechRecognizer(Context)} will 387 * fail. 388 * 389 * @param context with which {@code SpeechRecognizer} will be created 390 * @return {@code true} if recognition is available, {@code false} otherwise 391 */ isRecognitionAvailable(@onNull final Context context)392 public static boolean isRecognitionAvailable(@NonNull final Context context) { 393 // TODO(b/176578753): make sure this works well with system speech recognizers. 394 final List<ResolveInfo> list = context.getPackageManager().queryIntentServices( 395 new Intent(RecognitionService.SERVICE_INTERFACE), 0); 396 return list != null && list.size() != 0; 397 } 398 399 /** 400 * Checks whether an on-device speech recognition service is available on the system. If this 401 * method returns {@code false}, 402 * {@link SpeechRecognizer#createOnDeviceSpeechRecognizer(Context)} will 403 * fail. 404 * 405 * @param context with which on-device {@code SpeechRecognizer} will be created 406 * @return {@code true} if on-device recognition is available, {@code false} otherwise 407 */ isOnDeviceRecognitionAvailable(@onNull final Context context)408 public static boolean isOnDeviceRecognitionAvailable(@NonNull final Context context) { 409 ComponentName componentName = 410 ComponentName.unflattenFromString( 411 context.getString(R.string.config_defaultOnDeviceSpeechRecognitionService)); 412 return componentName != null; 413 } 414 415 /** 416 * Factory method to create a new {@code SpeechRecognizer}. Please note that 417 * {@link #setRecognitionListener(RecognitionListener)} should be called before dispatching any 418 * command to the created {@code SpeechRecognizer}, otherwise no notifications will be 419 * received. 420 * 421 * <p>For apps targeting Android 11 (API level 30) interaction with a speech recognition 422 * service requires <queries> element to be added to the manifest file: 423 * <pre>{@code 424 * <queries> 425 * <intent> 426 * <action 427 * android:name="android.speech.RecognitionService" /> 428 * </intent> 429 * </queries> 430 * }</pre> 431 * 432 * @param context in which to create {@code SpeechRecognizer} 433 * @return a new {@code SpeechRecognizer} 434 */ 435 @MainThread createSpeechRecognizer(final Context context)436 public static SpeechRecognizer createSpeechRecognizer(final Context context) { 437 return createSpeechRecognizer(context, null); 438 } 439 440 /** 441 * Factory method to create a new {@code SpeechRecognizer}. Please note that 442 * {@link #setRecognitionListener(RecognitionListener)} should be called before dispatching any 443 * command to the created {@code SpeechRecognizer}, otherwise no notifications will be 444 * received. 445 * Use this version of the method to specify a specific service to direct this 446 * {@link SpeechRecognizer} to. 447 * 448 * <p><strong>Important</strong>: before calling this method, please check via 449 * {@link android.content.pm.PackageManager#queryIntentServices(Intent, int)} that {@code 450 * serviceComponent} actually exists and provides 451 * {@link RecognitionService#SERVICE_INTERFACE}. Normally you would not use this; call 452 * {@link #createSpeechRecognizer(Context)} to use the system default recognition 453 * service instead or {@link #createOnDeviceSpeechRecognizer(Context)} to use on-device 454 * recognition.</p> 455 * 456 * <p>For apps targeting Android 11 (API level 30) interaction with a speech recognition 457 * service requires <queries> element to be added to the manifest file: 458 * <pre>{@code 459 * <queries> 460 * <intent> 461 * <action 462 * android:name="android.speech.RecognitionService" /> 463 * </intent> 464 * </queries> 465 * }</pre> 466 * 467 * @param context in which to create {@code SpeechRecognizer} 468 * @param serviceComponent the {@link ComponentName} of a specific service to direct this 469 * {@code SpeechRecognizer} to 470 * @return a new {@code SpeechRecognizer} 471 */ 472 @MainThread createSpeechRecognizer(final Context context, final ComponentName serviceComponent)473 public static SpeechRecognizer createSpeechRecognizer(final Context context, 474 final ComponentName serviceComponent) { 475 if (context == null) { 476 throw new IllegalArgumentException("Context cannot be null"); 477 } 478 checkIsCalledFromMainThread(); 479 return new SpeechRecognizer(context, serviceComponent); 480 } 481 482 /** 483 * Factory method to create a new {@code SpeechRecognizer}. 484 * 485 * <p>Please note that {@link #setRecognitionListener(RecognitionListener)} should be called 486 * before dispatching any command to the created {@code SpeechRecognizer}, otherwise no 487 * notifications will be received. 488 * 489 * @param context in which to create {@code SpeechRecognizer} 490 * @return a new on-device {@code SpeechRecognizer}. 491 * @throws UnsupportedOperationException iff {@link #isOnDeviceRecognitionAvailable(Context)} 492 * is false 493 */ 494 @NonNull 495 @MainThread createOnDeviceSpeechRecognizer(@onNull final Context context)496 public static SpeechRecognizer createOnDeviceSpeechRecognizer(@NonNull final Context context) { 497 if (!isOnDeviceRecognitionAvailable(context)) { 498 throw new UnsupportedOperationException("On-device recognition is not available"); 499 } 500 return lenientlyCreateOnDeviceSpeechRecognizer(context); 501 } 502 503 /** 504 * Helper method to create on-device SpeechRecognizer in tests even when the device does not 505 * support on-device speech recognition. 506 * 507 * @hide 508 */ 509 @TestApi 510 @NonNull 511 @MainThread createOnDeviceTestingSpeechRecognizer( @onNull final Context context)512 public static SpeechRecognizer createOnDeviceTestingSpeechRecognizer( 513 @NonNull final Context context) { 514 return lenientlyCreateOnDeviceSpeechRecognizer(context); 515 } 516 517 @NonNull 518 @MainThread lenientlyCreateOnDeviceSpeechRecognizer( @onNull final Context context)519 private static SpeechRecognizer lenientlyCreateOnDeviceSpeechRecognizer( 520 @NonNull final Context context) { 521 if (context == null) { 522 throw new IllegalArgumentException("Context cannot be null"); 523 } 524 checkIsCalledFromMainThread(); 525 return new SpeechRecognizer(context, /* onDevice */ true); 526 } 527 528 /** 529 * Sets the listener that will receive all the callbacks. The previous unfinished commands will 530 * be executed with the old listener, while any following command will be executed with the new 531 * listener. 532 * 533 * @param listener listener that will receive all the callbacks from the created 534 * {@link SpeechRecognizer}, this must not be null. 535 */ 536 @MainThread setRecognitionListener(RecognitionListener listener)537 public void setRecognitionListener(RecognitionListener listener) { 538 checkIsCalledFromMainThread(); 539 putMessage(Message.obtain(mHandler, MSG_CHANGE_LISTENER, listener)); 540 } 541 542 /** 543 * Starts listening for speech. Please note that 544 * {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise 545 * no notifications will be received. 546 * 547 * @param recognizerIntent contains parameters for the recognition to be performed. The intent 548 * may also contain optional extras, see {@link RecognizerIntent}. If these values are 549 * not set explicitly, default values will be used by the recognizer. 550 */ 551 @MainThread startListening(final Intent recognizerIntent)552 public void startListening(final Intent recognizerIntent) { 553 if (recognizerIntent == null) { 554 throw new IllegalArgumentException("intent must not be null"); 555 } 556 checkIsCalledFromMainThread(); 557 558 if (DBG) { 559 Slog.i(TAG, "#startListening called"); 560 if (mService == null) { 561 Slog.i(TAG, "Connection is not established yet"); 562 } 563 } 564 565 if (mService == null) { 566 // First time connection: first establish a connection, then dispatch #startListening. 567 connectToSystemService(); 568 } 569 putMessage(Message.obtain(mHandler, MSG_START, recognizerIntent)); 570 } 571 572 /** 573 * Stops listening for speech. Speech captured so far will be recognized as if the user had 574 * stopped speaking at this point. 575 * 576 * <p>Note that in the default case, this does not need to be called, as the speech endpointer 577 * will automatically stop the recognizer listening when it determines speech has completed. 578 * However, you can manipulate endpointer parameters directly using the intent extras defined in 579 * {@link RecognizerIntent}, in which case you may sometimes want to manually call this method 580 * to stop listening sooner. 581 * 582 * <p>Upon invocation clients must wait until {@link RecognitionListener#onResults} or 583 * {@link RecognitionListener#onError} are invoked before calling 584 * {@link SpeechRecognizer#startListening} again. Otherwise such an attempt would be rejected by 585 * recognition service. 586 * 587 * <p>Please note that 588 * {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise 589 * no notifications will be received. 590 */ 591 @MainThread stopListening()592 public void stopListening() { 593 checkIsCalledFromMainThread(); 594 595 if (DBG) { 596 Slog.i(TAG, "#stopListening called"); 597 if (mService == null) { 598 Slog.i(TAG, "Connection is not established yet"); 599 } 600 } 601 602 putMessage(Message.obtain(mHandler, MSG_STOP)); 603 } 604 605 /** 606 * Cancels the speech recognition. Please note that 607 * {@link #setRecognitionListener(RecognitionListener)} should be called beforehand, otherwise 608 * no notifications will be received. 609 */ 610 @MainThread cancel()611 public void cancel() { 612 checkIsCalledFromMainThread(); 613 putMessage(Message.obtain(mHandler, MSG_CANCEL)); 614 } 615 616 /** 617 * Checks whether {@code recognizerIntent} is supported by 618 * {@link SpeechRecognizer#startListening(Intent)}. 619 * 620 * @param recognizerIntent contains parameters for the recognition to be performed. The intent 621 * may also contain optional extras. See {@link RecognizerIntent} for the list of 622 * supported extras, any unlisted extra might be ignored. 623 * @param supportListener the listener on which to receive the support query results. 624 */ checkRecognitionSupport( @onNull Intent recognizerIntent, @NonNull @CallbackExecutor Executor executor, @NonNull RecognitionSupportCallback supportListener)625 public void checkRecognitionSupport( 626 @NonNull Intent recognizerIntent, 627 @NonNull @CallbackExecutor Executor executor, 628 @NonNull RecognitionSupportCallback supportListener) { 629 Objects.requireNonNull(recognizerIntent, "intent must not be null"); 630 Objects.requireNonNull(supportListener, "listener must not be null"); 631 632 if (DBG) { 633 Slog.i(TAG, "#checkRecognitionSupport called"); 634 if (mService == null) { 635 Slog.i(TAG, "Connection is not established yet"); 636 } 637 } 638 639 if (mService == null) { 640 // First time connection: first establish a connection, then dispatch. 641 connectToSystemService(); 642 } 643 putMessage(Message.obtain(mHandler, MSG_CHECK_RECOGNITION_SUPPORT, 644 new CheckRecognitionSupportArgs(recognizerIntent, executor, supportListener))); 645 } 646 647 /** 648 * Attempts to download the support for the given {@code recognizerIntent}. This might trigger 649 * user interaction to approve the download. Callers can verify the status of the request via 650 * {@link #checkRecognitionSupport(Intent, Executor, RecognitionSupportCallback)}. 651 * 652 * @param recognizerIntent contains parameters for the recognition to be performed. The intent 653 * may also contain optional extras, see {@link RecognizerIntent}. 654 */ triggerModelDownload(@onNull Intent recognizerIntent)655 public void triggerModelDownload(@NonNull Intent recognizerIntent) { 656 Objects.requireNonNull(recognizerIntent, "intent must not be null"); 657 if (DBG) { 658 Slog.i(TAG, "#triggerModelDownload without a listener called"); 659 if (mService == null) { 660 Slog.i(TAG, "Connection is not established yet"); 661 } 662 } 663 if (mService == null) { 664 // First time connection: first establish a connection, then dispatch. 665 connectToSystemService(); 666 } 667 putMessage(Message.obtain( 668 mHandler, MSG_TRIGGER_MODEL_DOWNLOAD, 669 new ModelDownloadListenerArgs(recognizerIntent, null, null))); 670 } 671 672 /** 673 * Attempts to download the support for the given {@code recognizerIntent}. This might trigger 674 * user interaction to approve the download. Callers can verify the status of the request via 675 * {@link #checkRecognitionSupport(Intent, Executor, RecognitionSupportCallback)}. 676 * 677 * <p> The updates about the model download request are received via the given 678 * {@link ModelDownloadListener}: 679 * 680 * <li> If the model is already available, {@link ModelDownloadListener#onSuccess()} will be 681 * called directly. The model can be safely used afterwards. 682 * 683 * <li> If the {@link RecognitionService} has started the download, 684 * {@link ModelDownloadListener#onProgress(int)} will be called an unspecified (zero or more) 685 * number of times until the download is complete. 686 * When the download finishes, {@link ModelDownloadListener#onSuccess()} will be called. 687 * The model can be safely used afterwards. 688 * 689 * <li> If the {@link RecognitionService} has only scheduled the download, but won't satisfy it 690 * immediately, {@link ModelDownloadListener#onScheduled()} will be called. 691 * There will be no further updates on this listener. 692 * 693 * <li> If the request fails at any time due to a network or scheduling error, 694 * {@link ModelDownloadListener#onError(int)} will be called. 695 * 696 * @param recognizerIntent contains parameters for the recognition to be performed. The intent 697 * may also contain optional extras, see {@link RecognizerIntent}. 698 * @param executor for dispatching listener callbacks 699 * @param listener on which to receive updates about the model download request. 700 */ triggerModelDownload( @onNull Intent recognizerIntent, @NonNull @CallbackExecutor Executor executor, @NonNull ModelDownloadListener listener)701 public void triggerModelDownload( 702 @NonNull Intent recognizerIntent, 703 @NonNull @CallbackExecutor Executor executor, 704 @NonNull ModelDownloadListener listener) { 705 Objects.requireNonNull(recognizerIntent, "intent must not be null"); 706 if (DBG) { 707 Slog.i(TAG, "#triggerModelDownload with a listener called"); 708 if (mService == null) { 709 Slog.i(TAG, "Connection is not established yet"); 710 } 711 } 712 if (mService == null) { 713 // First time connection: first establish a connection, then dispatch. 714 connectToSystemService(); 715 } 716 putMessage(Message.obtain( 717 mHandler, MSG_TRIGGER_MODEL_DOWNLOAD, 718 new ModelDownloadListenerArgs(recognizerIntent, executor, listener))); 719 } 720 721 /** 722 * Sets a temporary component to power on-device speech recognizer. 723 * 724 * <p>This is only expected to be called in tests, system would reject calls from client apps. 725 * 726 * @param componentName name of the component to set temporary replace speech recognizer. {@code 727 * null} value resets the recognizer to default. 728 * 729 * @hide 730 */ 731 @TestApi 732 @RequiresPermission(Manifest.permission.MANAGE_SPEECH_RECOGNITION) setTemporaryOnDeviceRecognizer(@ullable ComponentName componentName)733 public void setTemporaryOnDeviceRecognizer(@Nullable ComponentName componentName) { 734 mHandler.sendMessage( 735 Message.obtain(mHandler, MSG_SET_TEMPORARY_ON_DEVICE_COMPONENT, componentName)); 736 } 737 checkIsCalledFromMainThread()738 private static void checkIsCalledFromMainThread() { 739 if (Looper.myLooper() != Looper.getMainLooper()) { 740 throw new RuntimeException( 741 "SpeechRecognizer should be used only from the application's main thread"); 742 } 743 } 744 putMessage(Message msg)745 private void putMessage(Message msg) { 746 if (mService == null) { 747 mPendingTasks.offer(msg); 748 } else { 749 mHandler.sendMessage(msg); 750 } 751 } 752 753 /** sends the actual message to the service */ handleStartListening(Intent recognizerIntent)754 private void handleStartListening(Intent recognizerIntent) { 755 if (!checkOpenConnection()) { 756 return; 757 } 758 try { 759 mService.startListening(recognizerIntent, mListener, mContext.getAttributionSource()); 760 if (DBG) Log.d(TAG, "service start listening command succeeded"); 761 } catch (final RemoteException e) { 762 Log.e(TAG, "startListening() failed", e); 763 mListener.onError(ERROR_CLIENT); 764 } 765 } 766 767 /** sends the actual message to the service */ handleStopMessage()768 private void handleStopMessage() { 769 if (!checkOpenConnection()) { 770 return; 771 } 772 try { 773 mService.stopListening(mListener); 774 if (DBG) Log.d(TAG, "service stop listening command succeeded"); 775 } catch (final RemoteException e) { 776 Log.e(TAG, "stopListening() failed", e); 777 mListener.onError(ERROR_CLIENT); 778 } 779 } 780 781 /** sends the actual message to the service */ handleCancelMessage()782 private void handleCancelMessage() { 783 if (!checkOpenConnection()) { 784 return; 785 } 786 try { 787 mService.cancel(mListener, /*isShutdown*/ false); 788 if (DBG) Log.d(TAG, "service cancel command succeeded"); 789 } catch (final RemoteException e) { 790 Log.e(TAG, "cancel() failed", e); 791 mListener.onError(ERROR_CLIENT); 792 } 793 } 794 handleSetTemporaryComponent(ComponentName componentName)795 private void handleSetTemporaryComponent(ComponentName componentName) { 796 if (DBG) { 797 Log.d(TAG, "handleSetTemporaryComponent, componentName=" + componentName); 798 } 799 800 if (!maybeInitializeManagerService()) { 801 return; 802 } 803 804 try { 805 mManagerService.setTemporaryComponent(componentName); 806 } catch (final RemoteException e) { 807 e.rethrowFromSystemServer(); 808 } 809 } 810 handleCheckRecognitionSupport( Intent recognizerIntent, Executor callbackExecutor, RecognitionSupportCallback recognitionSupportCallback)811 private void handleCheckRecognitionSupport( 812 Intent recognizerIntent, 813 Executor callbackExecutor, 814 RecognitionSupportCallback recognitionSupportCallback) { 815 if (!maybeInitializeManagerService() || !checkOpenConnection()) { 816 return; 817 } 818 try { 819 mService.checkRecognitionSupport( 820 recognizerIntent, 821 mContext.getAttributionSource(), 822 new InternalSupportCallback(callbackExecutor, recognitionSupportCallback)); 823 if (DBG) Log.d(TAG, "service support command succeeded"); 824 } catch (final RemoteException e) { 825 Log.e(TAG, "checkRecognitionSupport() failed", e); 826 callbackExecutor.execute(() -> recognitionSupportCallback.onError(ERROR_CLIENT)); 827 } 828 } 829 handleTriggerModelDownload( Intent recognizerIntent, @Nullable Executor callbackExecutor, @Nullable ModelDownloadListener modelDownloadListener)830 private void handleTriggerModelDownload( 831 Intent recognizerIntent, 832 @Nullable Executor callbackExecutor, 833 @Nullable ModelDownloadListener modelDownloadListener) { 834 if (!maybeInitializeManagerService() || !checkOpenConnection()) { 835 return; 836 } 837 838 // Trigger model download without a listener. 839 if (modelDownloadListener == null) { 840 try { 841 mService.triggerModelDownload( 842 recognizerIntent, mContext.getAttributionSource(), null); 843 if (DBG) Log.d(TAG, "triggerModelDownload() without a listener"); 844 } catch (final RemoteException e) { 845 Log.e(TAG, "triggerModelDownload() without a listener failed", e); 846 mListener.onError(ERROR_CLIENT); 847 } 848 } 849 // Trigger model download with a listener. 850 else { 851 try { 852 mService.triggerModelDownload( 853 recognizerIntent, mContext.getAttributionSource(), 854 new InternalModelDownloadListener(callbackExecutor, modelDownloadListener)); 855 if (DBG) Log.d(TAG, "triggerModelDownload() with a listener"); 856 } catch (final RemoteException e) { 857 Log.e(TAG, "triggerModelDownload() with a listener failed", e); 858 callbackExecutor.execute(() -> modelDownloadListener.onError(ERROR_CLIENT)); 859 } 860 } 861 } 862 checkOpenConnection()863 private boolean checkOpenConnection() { 864 if (mService != null) { 865 return true; 866 } 867 mListener.onError(ERROR_CLIENT); 868 Log.e(TAG, "not connected to the recognition service"); 869 return false; 870 } 871 872 /** changes the listener */ handleChangeListener(RecognitionListener listener)873 private void handleChangeListener(RecognitionListener listener) { 874 if (DBG) Log.d(TAG, "handleChangeListener, listener=" + listener); 875 mListener.mInternalListener = listener; 876 } 877 878 /** Destroys the {@code SpeechRecognizer} object. */ destroy()879 public void destroy() { 880 if (mService != null) { 881 try { 882 mService.cancel(mListener, /*isShutdown*/ true); 883 } catch (final RemoteException e) { 884 // Not important 885 } 886 } 887 888 mService = null; 889 mPendingTasks.clear(); 890 mListener.mInternalListener = null; 891 } 892 893 /** Establishes a connection to system server proxy and initializes the session. */ connectToSystemService()894 private void connectToSystemService() { 895 if (!maybeInitializeManagerService()) { 896 return; 897 } 898 899 ComponentName componentName = getSpeechRecognizerComponentName(); 900 901 if (!mOnDevice && componentName == null) { 902 mListener.onError(ERROR_CLIENT); 903 return; 904 } 905 906 try { 907 mManagerService.createSession( 908 componentName, 909 mClientToken, 910 mOnDevice, 911 new IRecognitionServiceManagerCallback.Stub(){ 912 @Override 913 public void onSuccess(IRecognitionService service) throws RemoteException { 914 if (DBG) { 915 Log.i(TAG, "Connected to speech recognition service"); 916 } 917 mService = service; 918 while (!mPendingTasks.isEmpty()) { 919 mHandler.sendMessage(mPendingTasks.poll()); 920 } 921 } 922 923 @Override 924 public void onError(int errorCode) throws RemoteException { 925 Log.e(TAG, "Bind to system recognition service failed with error " 926 + errorCode); 927 mListener.onError(errorCode); 928 } 929 }); 930 } catch (RemoteException e) { 931 e.rethrowFromSystemServer(); 932 } 933 } 934 maybeInitializeManagerService()935 private synchronized boolean maybeInitializeManagerService() { 936 if (DBG) { 937 Log.i(TAG, "#maybeInitializeManagerService found = " + mManagerService); 938 } 939 if (mManagerService != null) { 940 return true; 941 } 942 943 IBinder service = ServiceManager.getService(Context.SPEECH_RECOGNITION_SERVICE); 944 if (service == null && mOnDevice) { 945 service = (IBinder) mContext.getSystemService(Context.SPEECH_RECOGNITION_SERVICE); 946 } 947 mManagerService = IRecognitionServiceManager.Stub.asInterface(service); 948 949 if (mManagerService == null) { 950 if (mListener != null) { 951 mListener.onError(ERROR_CLIENT); 952 } 953 return false; 954 } 955 return true; 956 } 957 958 /** 959 * Returns the component name to be used for establishing a connection, based on the parameters 960 * used during initialization. 961 * 962 * <p>Note the 3 different scenarios: 963 * <ol> 964 * <li>On-device speech recognizer which is determined by the manufacturer and not 965 * changeable by the user 966 * <li>Default user-selected speech recognizer as specified by 967 * {@code Settings.Secure.VOICE_RECOGNITION_SERVICE} 968 * <li>Custom speech recognizer supplied by the client. 969 */ getSpeechRecognizerComponentName()970 private ComponentName getSpeechRecognizerComponentName() { 971 if (mOnDevice) { 972 return null; 973 } 974 975 if (mServiceComponent != null) { 976 return mServiceComponent; 977 } 978 979 String serviceComponent = Settings.Secure.getString(mContext.getContentResolver(), 980 Settings.Secure.VOICE_RECOGNITION_SERVICE); 981 982 if (TextUtils.isEmpty(serviceComponent)) { 983 Log.e(TAG, "no selected voice recognition service"); 984 mListener.onError(ERROR_CLIENT); 985 return null; 986 } 987 988 return ComponentName.unflattenFromString(serviceComponent); 989 } 990 991 private static class CheckRecognitionSupportArgs { 992 final Intent mIntent; 993 final Executor mCallbackExecutor; 994 final RecognitionSupportCallback mCallback; 995 CheckRecognitionSupportArgs( Intent intent, Executor callbackExecutor, RecognitionSupportCallback callback)996 private CheckRecognitionSupportArgs( 997 Intent intent, 998 Executor callbackExecutor, 999 RecognitionSupportCallback callback) { 1000 mIntent = intent; 1001 mCallbackExecutor = callbackExecutor; 1002 mCallback = callback; 1003 } 1004 } 1005 1006 private static class ModelDownloadListenerArgs { 1007 final Intent mIntent; 1008 final Executor mExecutor; 1009 final ModelDownloadListener mModelDownloadListener; 1010 ModelDownloadListenerArgs(Intent intent, Executor executor, ModelDownloadListener modelDownloadListener)1011 private ModelDownloadListenerArgs(Intent intent, Executor executor, 1012 ModelDownloadListener modelDownloadListener) { 1013 mIntent = intent; 1014 mExecutor = executor; 1015 mModelDownloadListener = modelDownloadListener; 1016 } 1017 } 1018 1019 /** 1020 * Internal wrapper of IRecognitionListener which will propagate the results to 1021 * RecognitionListener 1022 */ 1023 private static class InternalRecognitionListener extends IRecognitionListener.Stub { 1024 private RecognitionListener mInternalListener; 1025 1026 private static final int MSG_BEGINNING_OF_SPEECH = 1; 1027 private static final int MSG_BUFFER_RECEIVED = 2; 1028 private static final int MSG_END_OF_SPEECH = 3; 1029 private static final int MSG_ERROR = 4; 1030 private static final int MSG_READY_FOR_SPEECH = 5; 1031 private static final int MSG_RESULTS = 6; 1032 private static final int MSG_PARTIAL_RESULTS = 7; 1033 private static final int MSG_RMS_CHANGED = 8; 1034 private static final int MSG_ON_EVENT = 9; 1035 private static final int MSG_SEGMENT_RESULTS = 10; 1036 private static final int MSG_SEGMENT_END_SESSION = 11; 1037 private static final int MSG_LANGUAGE_DETECTION = 12; 1038 1039 private final Handler mInternalHandler = new Handler(Looper.getMainLooper()) { 1040 @Override 1041 public void handleMessage(Message msg) { 1042 if (mInternalListener == null) { 1043 return; 1044 } 1045 switch (msg.what) { 1046 case MSG_BEGINNING_OF_SPEECH: 1047 mInternalListener.onBeginningOfSpeech(); 1048 break; 1049 case MSG_BUFFER_RECEIVED: 1050 mInternalListener.onBufferReceived((byte[]) msg.obj); 1051 break; 1052 case MSG_END_OF_SPEECH: 1053 mInternalListener.onEndOfSpeech(); 1054 break; 1055 case MSG_ERROR: 1056 mInternalListener.onError((Integer) msg.obj); 1057 break; 1058 case MSG_READY_FOR_SPEECH: 1059 mInternalListener.onReadyForSpeech((Bundle) msg.obj); 1060 break; 1061 case MSG_RESULTS: 1062 mInternalListener.onResults((Bundle) msg.obj); 1063 break; 1064 case MSG_PARTIAL_RESULTS: 1065 mInternalListener.onPartialResults((Bundle) msg.obj); 1066 break; 1067 case MSG_RMS_CHANGED: 1068 mInternalListener.onRmsChanged((Float) msg.obj); 1069 break; 1070 case MSG_ON_EVENT: 1071 mInternalListener.onEvent(msg.arg1, (Bundle) msg.obj); 1072 break; 1073 case MSG_SEGMENT_RESULTS: 1074 mInternalListener.onSegmentResults((Bundle) msg.obj); 1075 break; 1076 case MSG_SEGMENT_END_SESSION: 1077 mInternalListener.onEndOfSegmentedSession(); 1078 break; 1079 case MSG_LANGUAGE_DETECTION: 1080 mInternalListener.onLanguageDetection((Bundle) msg.obj); 1081 break; 1082 } 1083 } 1084 }; 1085 onBeginningOfSpeech()1086 public void onBeginningOfSpeech() { 1087 Message.obtain(mInternalHandler, MSG_BEGINNING_OF_SPEECH).sendToTarget(); 1088 } 1089 onBufferReceived(final byte[] buffer)1090 public void onBufferReceived(final byte[] buffer) { 1091 Message.obtain(mInternalHandler, MSG_BUFFER_RECEIVED, buffer).sendToTarget(); 1092 } 1093 onEndOfSpeech()1094 public void onEndOfSpeech() { 1095 Message.obtain(mInternalHandler, MSG_END_OF_SPEECH).sendToTarget(); 1096 } 1097 onError(final int error)1098 public void onError(final int error) { 1099 Message.obtain(mInternalHandler, MSG_ERROR, error).sendToTarget(); 1100 } 1101 onReadyForSpeech(final Bundle noiseParams)1102 public void onReadyForSpeech(final Bundle noiseParams) { 1103 Message.obtain(mInternalHandler, MSG_READY_FOR_SPEECH, noiseParams).sendToTarget(); 1104 } 1105 onResults(final Bundle results)1106 public void onResults(final Bundle results) { 1107 Message.obtain(mInternalHandler, MSG_RESULTS, results).sendToTarget(); 1108 } 1109 onPartialResults(final Bundle results)1110 public void onPartialResults(final Bundle results) { 1111 Message.obtain(mInternalHandler, MSG_PARTIAL_RESULTS, results).sendToTarget(); 1112 } 1113 onRmsChanged(final float rmsdB)1114 public void onRmsChanged(final float rmsdB) { 1115 Message.obtain(mInternalHandler, MSG_RMS_CHANGED, rmsdB).sendToTarget(); 1116 } 1117 onSegmentResults(final Bundle bundle)1118 public void onSegmentResults(final Bundle bundle) { 1119 Message.obtain(mInternalHandler, MSG_SEGMENT_RESULTS, bundle).sendToTarget(); 1120 } 1121 onEndOfSegmentedSession()1122 public void onEndOfSegmentedSession() { 1123 Message.obtain(mInternalHandler, MSG_SEGMENT_END_SESSION).sendToTarget(); 1124 } 1125 onLanguageDetection(final Bundle results)1126 public void onLanguageDetection(final Bundle results) { 1127 Message.obtain(mInternalHandler, MSG_LANGUAGE_DETECTION, results).sendToTarget(); 1128 } 1129 onEvent(final int eventType, final Bundle params)1130 public void onEvent(final int eventType, final Bundle params) { 1131 Message.obtain(mInternalHandler, MSG_ON_EVENT, eventType, eventType, params) 1132 .sendToTarget(); 1133 } 1134 } 1135 1136 private static class InternalSupportCallback extends IRecognitionSupportCallback.Stub { 1137 private final Executor mExecutor; 1138 private final RecognitionSupportCallback mCallback; 1139 InternalSupportCallback(Executor executor, RecognitionSupportCallback callback)1140 private InternalSupportCallback(Executor executor, RecognitionSupportCallback callback) { 1141 this.mExecutor = executor; 1142 this.mCallback = callback; 1143 } 1144 1145 @Override onSupportResult(RecognitionSupport recognitionSupport)1146 public void onSupportResult(RecognitionSupport recognitionSupport) throws RemoteException { 1147 mExecutor.execute(() -> mCallback.onSupportResult(recognitionSupport)); 1148 } 1149 1150 @Override onError(int errorCode)1151 public void onError(int errorCode) throws RemoteException { 1152 mExecutor.execute(() -> mCallback.onError(errorCode)); 1153 } 1154 } 1155 1156 private static class InternalModelDownloadListener extends IModelDownloadListener.Stub { 1157 private final Executor mExecutor; 1158 private final ModelDownloadListener mModelDownloadListener; 1159 InternalModelDownloadListener( Executor executor, @NonNull ModelDownloadListener modelDownloadListener)1160 private InternalModelDownloadListener( 1161 Executor executor, 1162 @NonNull ModelDownloadListener modelDownloadListener) { 1163 mExecutor = executor; 1164 mModelDownloadListener = modelDownloadListener; 1165 } 1166 1167 @Override onProgress(int completedPercent)1168 public void onProgress(int completedPercent) throws RemoteException { 1169 mExecutor.execute(() -> mModelDownloadListener.onProgress(completedPercent)); 1170 } 1171 1172 @Override onSuccess()1173 public void onSuccess() throws RemoteException { 1174 mExecutor.execute(() -> mModelDownloadListener.onSuccess()); 1175 } 1176 1177 @Override onScheduled()1178 public void onScheduled() throws RemoteException { 1179 mExecutor.execute(() -> mModelDownloadListener.onScheduled()); 1180 } 1181 1182 @Override onError(int error)1183 public void onError(int error) throws RemoteException { 1184 mExecutor.execute(() -> mModelDownloadListener.onError(error)); 1185 } 1186 } 1187 } 1188