· 5 years ago · Jun 21, 2020, 07:54 AM
1// Copyright 2017 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5'use strict';
6
7/**
8 * Get the preferred language for UI localization. Represents Chrome's UI
9 * language, which might not coincide with the user's "preferred" language
10 * in the Settings. For more details, see:
11 * - https://developer.mozilla.org/en/docs/Web/API/NavigatorLanguage/language
12 * - https://developer.mozilla.org/en/docs/Web/API/NavigatorLanguage/languages
13 *
14 * The returned value is a language version string as defined in
15 * <a href="http://www.ietf.org/rfc/bcp/bcp47.txt">BCP 47</a>.
16 * Examples: "en", "en-US", "cs-CZ", etc.
17 */
18function getChromeUILanguage() {
19 // In Chrome, |window.navigator.language| is not guaranteed to be equal to
20 // |window.navigator.languages[0]|.
21 return window.navigator.language;
22}
23
24/**
25 * The different types of user action and error events that are logged
26 * from Voice Search. This enum is used to transfer information to
27 * the renderer and is not used as a UMA enum histogram's logged value.
28 * Note: Keep in sync with common/ntp_logging_events.h
29 * @enum {!number}
30 * @const
31 */
32const LOG_TYPE = {
33 // Activated by clicking on the fakebox icon.
34 ACTION_ACTIVATE_FAKEBOX: 13,
35 // Activated by keyboard shortcut.
36 ACTION_ACTIVATE_KEYBOARD: 14,
37 // Close the voice overlay by a user's explicit action.
38 ACTION_CLOSE_OVERLAY: 15,
39 // Submitted voice query.
40 ACTION_QUERY_SUBMITTED: 16,
41 // Clicked on support link in error message.
42 ACTION_SUPPORT_LINK_CLICKED: 17,
43 // Retried by clicking Try Again link.
44 ACTION_TRY_AGAIN_LINK: 18,
45 // Retried by clicking microphone button.
46 ACTION_TRY_AGAIN_MIC_BUTTON: 10,
47 // Errors received from the Speech Recognition API.
48 ERROR_NO_SPEECH: 20,
49 ERROR_ABORTED: 21,
50 ERROR_AUDIO_CAPTURE: 22,
51 ERROR_NETWORK: 23,
52 ERROR_NOT_ALLOWED: 24,
53 ERROR_SERVICE_NOT_ALLOWED: 25,
54 ERROR_BAD_GRAMMAR: 26,
55 ERROR_LANGUAGE_NOT_SUPPORTED: 27,
56 ERROR_NO_MATCH: 28,
57 ERROR_OTHER: 29
58};
59
60/**
61 * Enum for keyboard event codes.
62 * @enum {!string}
63 * @const
64 */
65const KEYCODE = {
66 ENTER: 'Enter',
67 ESC: 'Escape',
68 NUMPAD_ENTER: 'NumpadEnter',
69 PERIOD: 'Period',
70 SPACE: 'Space',
71 TAB: 'Tab'
72};
73
74/**
75 * The set of possible recognition errors.
76 * @enum {!number}
77 * @const
78 */
79const RecognitionError = {
80 NO_SPEECH: 0,
81 ABORTED: 1,
82 AUDIO_CAPTURE: 2,
83 NETWORK: 3,
84 NOT_ALLOWED: 4,
85 SERVICE_NOT_ALLOWED: 5,
86 BAD_GRAMMAR: 6,
87 LANGUAGE_NOT_SUPPORTED: 7,
88 NO_MATCH: 8,
89 OTHER: 9
90};
91
92/**
93 * Provides methods for communicating with the <a
94 * href="https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API">
95 * Web Speech API</a>, error handling and executing search queries.
96 */
97const speech = {};
98
99speech.SCREEN_READER_ANNOUNCER_ = 'screen-reader-announcer';
100
101/**
102 * Localized translations for messages used in the Speech UI.
103 * @type {{
104 * audioError: string,
105 * details: string,
106 * languageError: string,
107 * learnMore: string,
108 * listening: string,
109 * networkError: string,
110 * noTranslation: string,
111 * noVoice: string,
112 * otherError: string,
113 * permissionError: string,
114 * ready: string,
115 * tryAgain: string,
116 * waiting: string
117 * }}
118 */
119speech.messages = {
120 audioError: '',
121 details: '',
122 languageError: '',
123 learnMore: '',
124 listening: '',
125 networkError: '',
126 noTranslation: '',
127 noVoice: '',
128 otherError: '',
129 permissionError: '',
130 ready: '',
131 tryAgain: '',
132 waiting: ''
133};
134
135/**
136 * The set of controller states.
137 * @enum {number}
138 * @private
139 */
140speech.State_ = {
141 // Initial state of the controller. It is never re-entered.
142 // The only state from which the |speech.init()| method can be called.
143 // The UI overlay is hidden, recognition is inactive.
144 UNINITIALIZED: -1,
145 // Represents a ready to be activated state. If voice search is unsuccessful
146 // for any reason, the controller will return to this state
147 // using |speech.reset_()|. The UI overlay is hidden, recognition is inactive.
148 READY: 0,
149 // Indicates that speech recognition has started, but no audio has yet
150 // been captured. The UI overlay is visible, recognition is active.
151 STARTED: 1,
152 // Indicates that audio is being captured by the Web Speech API, but no
153 // speech has yet been recognized. The UI overlay is visible and indicating
154 // that audio is being captured, recognition is active.
155 AUDIO_RECEIVED: 2,
156 // Represents a state where speech has been recognized by the Web Speech API,
157 // but no resulting transcripts have yet been received back. The UI overlay is
158 // visible and indicating that audio is being captured, recognition is active.
159 SPEECH_RECEIVED: 3,
160 // Controller state where speech has been successfully recognized and text
161 // transcripts have been reported back. The UI overlay is visible
162 // and displaying intermediate results, recognition is active.
163 // This state remains until recognition ends successfully or due to an error.
164 RESULT_RECEIVED: 4,
165 // Indicates that speech recognition has failed due to an error
166 // (or a no match error) being received from the Web Speech API.
167 // A timeout may have occurred as well. The UI overlay is visible
168 // and displaying an error message, recognition is inactive.
169 ERROR_RECEIVED: 5,
170 // Represents a state where speech recognition has been stopped
171 // (either on success or failure) and the UI has not yet reset/redirected.
172 // The UI overlay is displaying results or an error message with a timeout,
173 // after which the site will either get redirected to search results
174 // (successful) or back to the NTP by hiding the overlay (unsuccessful).
175 STOPPED: 6
176};
177
178/**
179 * Threshold for considering an interim speech transcript result as "confident
180 * enough". The more confident the API is about a transcript, the higher the
181 * confidence (number between 0 and 1).
182 * @private {number}
183 * @const
184 */
185speech.RECOGNITION_CONFIDENCE_THRESHOLD_ = 0.5;
186
187/**
188 * Time in milliseconds to wait before closing the UI after an error has
189 * occured. This is a short timeout used when no click-target is present.
190 * @private {number}
191 * @const
192 */
193speech.ERROR_TIMEOUT_SHORT_MS_ = 3000;
194
195/**
196 * Time in milliseconds to wait before closing the UI after an error has
197 * occured. This is a longer timeout used when there is a click-target is
198 * present.
199 * @private {number}
200 * @const
201 */
202speech.ERROR_TIMEOUT_LONG_MS_ = 8000;
203
204/**
205 * Time in milliseconds to wait before closing the UI if no interaction has
206 * occured.
207 * @private {number}
208 * @const
209 */
210speech.IDLE_TIMEOUT_MS_ = 8000;
211
212/**
213 * Maximum number of characters recognized before force-submitting a query.
214 * Includes characters of non-confident recognition transcripts.
215 * @private {number}
216 * @const
217 */
218speech.QUERY_LENGTH_LIMIT_ = 120;
219
220/**
221 * Specifies the current state of the controller.
222 * Note: Different than the UI state.
223 * @private {speech.State_}
224 */
225speech.currentState_ = speech.State_.UNINITIALIZED;
226
227/**
228 * The ID for the error timer.
229 * @private {number}
230 */
231speech.errorTimer_;
232
233/**
234 * The duration of the timeout for the UI elements during an error state.
235 * Depending on the error state, we have different durations for the timeout.
236 * @private {number}
237 */
238speech.errorTimeoutMs_ = 0;
239
240/**
241 * The last high confidence voice transcript received from the Web Speech API.
242 * This is the actual query that could potentially be submitted to Search.
243 * @private {string}
244 */
245speech.finalResult_;
246
247/**
248 * Base URL for sending queries to Search. Includes trailing forward slash.
249 * @private {string}
250 */
251speech.googleBaseUrl_;
252
253/**
254 * The ID for the idle timer.
255 * @private {number}
256 */
257speech.idleTimer_;
258
259/**
260 * The last low confidence voice transcript received from the Web Speech API.
261 * @private {string}
262 */
263speech.interimResult_;
264
265/**
266 * The Web Speech API object driving the speech recognition transaction.
267 * @private {!webkitSpeechRecognition}
268 */
269speech.recognition_;
270
271/**
272 * Indicates if the user is using keyboard navigation (i.e. tab).
273 * @private {boolean}
274 */
275speech.usingKeyboardNavigation_ = false;
276
277/**
278 * Log an event from Voice Search.
279 * @param {number} eventType Event from |LOG_TYPE|.
280 */
281speech.logEvent = function(eventType) {
282 window.chrome.embeddedSearch.newTabPage.logEvent(eventType);
283};
284
285/**
286 * Initialize the speech module as part of the local NTP. Adds event handlers
287 * and shows the fakebox microphone icon.
288 * @param {string} googleBaseUrl Base URL for sending queries to Search.
289 * @param {!Object} translatedStrings Dictionary of localized string messages.
290 * @param {?Element} fakeboxMicrophoneElem Fakebox microphone icon element.
291 * @param {!Object} searchboxApiHandle SearchBox API handle.
292 */
293speech.init = function(
294 googleBaseUrl, translatedStrings, fakeboxMicrophoneElem,
295 searchboxApiHandle) {
296 if (!fakeboxMicrophoneElem) {
297 throw new Error('Speech button element not found.');
298 }
299
300 if (speech.currentState_ != speech.State_.UNINITIALIZED) {
301 throw new Error(
302 'Trying to re-initialize speech when not in UNINITIALIZED state.');
303 }
304
305 // Initialize event handlers.
306 fakeboxMicrophoneElem.hidden = false;
307 fakeboxMicrophoneElem.title = translatedStrings.fakeboxMicrophoneTooltip;
308 fakeboxMicrophoneElem.onclick = function(event) {
309 // If propagated, closes the overlay (click on the background).
310 event.stopPropagation();
311 speech.logEvent(LOG_TYPE.ACTION_ACTIVATE_FAKEBOX);
312 speech.start();
313 };
314 fakeboxMicrophoneElem.onkeydown = function(event) {
315 if (!event.repeat && speech.isSpaceOrEnter_(event.code) &&
316 speech.currentState_ == speech.State_.READY) {
317 event.stopPropagation();
318 speech.start();
319 }
320 };
321 window.addEventListener('keydown', speech.onKeyDown);
322 if (searchboxApiHandle.onfocuschange) {
323 throw new Error('OnFocusChange handler already set on searchbox.');
324 }
325 searchboxApiHandle.onfocuschange = speech.onOmniboxFocused;
326 const dialog = $(view.DIALOG_ID_);
327 if (dialog) {
328 dialog.addEventListener('close', () => {
329 speech.screenReaderAnnounce_(translatedStrings.voiceSearchClosed);
330 fakeboxMicrophoneElem.focus();
331 });
332 }
333
334 // Initialize speech internal state.
335 speech.googleBaseUrl_ = googleBaseUrl;
336 speech.messages = {
337 audioError: translatedStrings.audioError,
338 details: translatedStrings.details,
339 languageError: translatedStrings.languageError,
340 learnMore: translatedStrings.learnMore,
341 listening: translatedStrings.listening,
342 networkError: translatedStrings.networkError,
343 noTranslation: translatedStrings.noTranslation,
344 noVoice: translatedStrings.noVoice,
345 otherError: translatedStrings.otherError,
346 permissionError: translatedStrings.permissionError,
347 ready: translatedStrings.ready,
348 tryAgain: translatedStrings.tryAgain,
349 waiting: translatedStrings.waiting,
350 };
351 view.init(speech.onClick_);
352 view.setTitles(translatedStrings);
353 speech.initWebkitSpeech_();
354 speech.reset_();
355};
356
357/**
358 * Initializes and configures the speech recognition API.
359 * @private
360 */
361speech.initWebkitSpeech_ = function() {
362 speech.recognition_ = new webkitSpeechRecognition();
363 speech.recognition_.continuous = false;
364 speech.recognition_.interimResults = true;
365 speech.recognition_.lang = getChromeUILanguage();
366 speech.recognition_.onaudiostart = speech.handleRecognitionAudioStart_;
367 speech.recognition_.onend = speech.handleRecognitionEnd_;
368 speech.recognition_.onerror = speech.handleRecognitionError_;
369 speech.recognition_.onnomatch = speech.handleRecognitionOnNoMatch_;
370 speech.recognition_.onresult = speech.handleRecognitionResult_;
371 speech.recognition_.onspeechstart = speech.handleRecognitionSpeechStart_;
372};
373
374/**
375 * Sets up the necessary states for voice search and then starts the
376 * speech recognition interface.
377 */
378speech.start = function() {
379 view.show();
380
381 speech.resetIdleTimer_(speech.IDLE_TIMEOUT_MS_);
382
383 document.addEventListener(
384 'webkitvisibilitychange', speech.onVisibilityChange_, false);
385
386 // Initialize |speech.recognition_| if it isn't already.
387 if (!speech.recognition_) {
388 speech.initWebkitSpeech_();
389 }
390
391 // If |speech.start()| is called too soon after |speech.stop()| then the
392 // recognition interface hasn't yet reset and an error occurs. In this case
393 // we need to hard-reset it and reissue the |recognition_.start()| command.
394 try {
395 speech.recognition_.start();
396 speech.currentState_ = speech.State_.STARTED;
397 } catch (error) {
398 speech.initWebkitSpeech_();
399 try {
400 speech.recognition_.start();
401 speech.currentState_ = speech.State_.STARTED;
402 } catch (error2) {
403 speech.stop();
404 }
405 }
406};
407
408/**
409 * Hides the overlay and resets the speech state.
410 */
411speech.stop = function() {
412 speech.recognition_.abort();
413 speech.currentState_ = speech.State_.STOPPED;
414 view.hide();
415 speech.reset_();
416};
417
418/**
419 * Resets the internal state to the READY state.
420 * @private
421 */
422speech.reset_ = function() {
423 window.clearTimeout(speech.idleTimer_);
424 window.clearTimeout(speech.errorTimer_);
425
426 document.removeEventListener(
427 'webkitvisibilitychange', speech.onVisibilityChange_, false);
428
429 speech.interimResult_ = '';
430 speech.finalResult_ = '';
431 speech.currentState_ = speech.State_.READY;
432 speech.usingKeyboardNavigation_ = false;
433};
434
435/**
436 * Informs the view that the browser is receiving audio input.
437 * @param {Event=} opt_event Emitted event for audio start.
438 * @private
439 */
440speech.handleRecognitionAudioStart_ = function(opt_event) {
441 speech.resetIdleTimer_(speech.IDLE_TIMEOUT_MS_);
442 speech.currentState_ = speech.State_.AUDIO_RECEIVED;
443 view.setReadyForSpeech();
444};
445
446/**
447 * Function is called when the user starts speaking.
448 * @param {Event=} opt_event Emitted event for speech start.
449 * @private
450 */
451speech.handleRecognitionSpeechStart_ = function(opt_event) {
452 speech.resetIdleTimer_(speech.IDLE_TIMEOUT_MS_);
453 speech.currentState_ = speech.State_.SPEECH_RECEIVED;
454 view.setReceivingSpeech();
455};
456
457/**
458 * Processes the recognition results arriving from the Web Speech API.
459 * @param {SpeechRecognitionEvent} responseEvent Event coming from the API.
460 * @private
461 */
462speech.handleRecognitionResult_ = function(responseEvent) {
463 speech.resetIdleTimer_(speech.IDLE_TIMEOUT_MS_);
464
465 switch (speech.currentState_) {
466 case speech.State_.RESULT_RECEIVED:
467 case speech.State_.SPEECH_RECEIVED:
468 // Normal, expected states for processing results.
469 break;
470 case speech.State_.AUDIO_RECEIVED:
471 // Network bugginess (the onaudiostart packet was lost).
472 speech.handleRecognitionSpeechStart_();
473 break;
474 case speech.State_.STARTED:
475 // Network bugginess (the onspeechstart packet was lost).
476 speech.handleRecognitionAudioStart_();
477 speech.handleRecognitionSpeechStart_();
478 break;
479 default:
480 // Not expecting results in any other states.
481 return;
482 }
483
484 const results = responseEvent.results;
485 if (results.length == 0) {
486 return;
487 }
488 speech.currentState_ = speech.State_.RESULT_RECEIVED;
489 speech.interimResult_ = '';
490 speech.finalResult_ = '';
491
492 const finalResult = results[responseEvent.resultIndex];
493 // Process final results.
494 if (finalResult.isFinal) {
495 speech.finalResult_ = finalResult[0].transcript;
496 view.updateSpeechResult(speech.finalResult_, speech.finalResult_);
497
498 speech.submitFinalResult_();
499 return;
500 }
501
502 // Process interim results.
503 for (let j = 0; j < results.length; j++) {
504 const result = results[j][0];
505 speech.interimResult_ += result.transcript;
506 if (result.confidence > speech.RECOGNITION_CONFIDENCE_THRESHOLD_) {
507 speech.finalResult_ += result.transcript;
508 }
509 }
510 view.updateSpeechResult(speech.interimResult_, speech.finalResult_);
511
512 // Force-stop long queries.
513 if (speech.interimResult_.length > speech.QUERY_LENGTH_LIMIT_) {
514 if (speech.finalResult_) {
515 speech.submitFinalResult_();
516 } else {
517 speech.onErrorReceived_(RecognitionError.NO_MATCH);
518 }
519 }
520};
521
522/**
523 * Convert a |RecognitionError| to a |LOG_TYPE| error constant,
524 * for UMA logging.
525 * @param {RecognitionError} error The received error.
526 * @private
527 */
528speech.errorToLogType_ = function(error) {
529 switch (error) {
530 case RecognitionError.ABORTED:
531 return LOG_TYPE.ERROR_ABORTED;
532 case RecognitionError.AUDIO_CAPTURE:
533 return LOG_TYPE.ERROR_AUDIO_CAPTURE;
534 case RecognitionError.BAD_GRAMMAR:
535 return LOG_TYPE.ERROR_BAD_GRAMMAR;
536 case RecognitionError.LANGUAGE_NOT_SUPPORTED:
537 return LOG_TYPE.ERROR_LANGUAGE_NOT_SUPPORTED;
538 case RecognitionError.NETWORK:
539 return LOG_TYPE.ERROR_NETWORK;
540 case RecognitionError.NO_MATCH:
541 return LOG_TYPE.ERROR_NO_MATCH;
542 case RecognitionError.NO_SPEECH:
543 return LOG_TYPE.ERROR_NO_SPEECH;
544 case RecognitionError.NOT_ALLOWED:
545 return LOG_TYPE.ERROR_NOT_ALLOWED;
546 case RecognitionError.SERVICE_NOT_ALLOWED:
547 return LOG_TYPE.ERROR_SERVICE_NOT_ALLOWED;
548 default:
549 return LOG_TYPE.ERROR_OTHER;
550 }
551};
552
553/**
554 * Handles state transition for the controller when an error occurs
555 * during speech recognition.
556 * @param {RecognitionError} error The appropriate error state from
557 * the RecognitionError enum.
558 * @private
559 */
560speech.onErrorReceived_ = function(error) {
561 speech.logEvent(speech.errorToLogType_(error));
562 speech.resetIdleTimer_(speech.IDLE_TIMEOUT_MS_);
563 speech.errorTimeoutMs_ = speech.getRecognitionErrorTimeout_(error);
564 if (error != RecognitionError.ABORTED) {
565 speech.currentState_ = speech.State_.ERROR_RECEIVED;
566 view.showError(error);
567 window.clearTimeout(speech.idleTimer_);
568 speech.resetErrorTimer_(speech.errorTimeoutMs_);
569 }
570};
571
572/**
573 * Called when an error from Web Speech API is received.
574 * @param {SpeechRecognitionError} error The error event.
575 * @private
576 */
577speech.handleRecognitionError_ = function(error) {
578 speech.onErrorReceived_(speech.getRecognitionError_(error.error));
579};
580
581/**
582 * Stops speech recognition when no matches are found.
583 * @private
584 */
585speech.handleRecognitionOnNoMatch_ = function() {
586 speech.onErrorReceived_(RecognitionError.NO_MATCH);
587};
588
589/**
590 * Stops the UI when the Web Speech API reports that it has halted speech
591 * recognition.
592 * @private
593 */
594speech.handleRecognitionEnd_ = function() {
595 window.clearTimeout(speech.idleTimer_);
596
597 let error;
598 switch (speech.currentState_) {
599 case speech.State_.STARTED:
600 error = RecognitionError.AUDIO_CAPTURE;
601 break;
602 case speech.State_.AUDIO_RECEIVED:
603 error = RecognitionError.NO_SPEECH;
604 break;
605 case speech.State_.SPEECH_RECEIVED:
606 case speech.State_.RESULT_RECEIVED:
607 error = RecognitionError.NO_MATCH;
608 break;
609 case speech.State_.ERROR_RECEIVED:
610 error = RecognitionError.OTHER;
611 break;
612 default:
613 return;
614 }
615
616 // If error has not yet been displayed.
617 if (speech.currentState_ != speech.State_.ERROR_RECEIVED) {
618 view.showError(error);
619 speech.resetErrorTimer_(speech.ERROR_TIMEOUT_LONG_MS_);
620 }
621 speech.currentState_ = speech.State_.STOPPED;
622};
623
624/**
625 * Determines whether the user's browser is probably running on a Mac.
626 * @return {boolean} True iff the user's browser is running on a Mac.
627 * @private
628 */
629speech.isUserAgentMac_ = function() {
630 return window.navigator.userAgent.includes('Macintosh');
631};
632
633/**
634 * Determines, if the given KeyboardEvent |code| is a space or enter key.
635 * @param {string} code A KeyboardEvent's |code| property.
636 * @return True, iff the code represents a space or enter key.
637 * @private
638 */
639speech.isSpaceOrEnter_ = function(code) {
640 switch (code) {
641 case KEYCODE.ENTER:
642 case KEYCODE.NUMPAD_ENTER:
643 case KEYCODE.SPACE:
644 return true;
645 default:
646 return false;
647 }
648};
649
650/**
651 * Determines if the given event's target id is for a button or navigation link.
652 * @param {string} id An event's target id.
653 * @return True, iff the id is for a button or link.
654 * @private
655 */
656speech.isButtonOrLink_ = function(id) {
657 switch (id) {
658 case text.RETRY_LINK_ID:
659 case text.SUPPORT_LINK_ID:
660 case view.CLOSE_BUTTON_ID:
661 return true;
662 default:
663 return false;
664 }
665};
666
667/**
668 * Handles the following keyboard actions.
669 * - <CTRL> + <SHIFT> + <.> starts voice input(<CMD> + <SHIFT> + <.> on mac).
670 * - <ESC> aborts voice input when the recognition interface is active.
671 * - <ENTER> or <SPACE> interprets as a click if the target is a button or
672 * navigation link, otherwise it submits the speech query if there is one
673 * @param {!Event} event The keydown event.
674 */
675speech.onKeyDown = function(event) {
676 if (speech.isUiDefinitelyHidden_()) {
677 const ctrlKeyPressed =
678 event.ctrlKey || (speech.isUserAgentMac_() && event.metaKey);
679 if (speech.currentState_ == speech.State_.READY &&
680 event.code == KEYCODE.PERIOD && event.shiftKey && ctrlKeyPressed) {
681 speech.logEvent(LOG_TYPE.ACTION_ACTIVATE_KEYBOARD);
682 speech.start();
683 }
684 } else {
685 // Ensures that keyboard events are not propagated during voice input.
686 event.stopPropagation();
687
688 if (event.code == KEYCODE.TAB) {
689 speech.usingKeyboardNavigation_ = true;
690 } else if (speech.isSpaceOrEnter_(event.code)) {
691 if (event.target != null && speech.isButtonOrLink_(event.target.id)) {
692 view.onWindowClick_(event);
693 } else if (speech.finalResult_) {
694 speech.submitFinalResult_();
695 } else {
696 speech.logEvent(LOG_TYPE.ACTION_CLOSE_OVERLAY);
697 speech.stop();
698 }
699 } else if (event.code == KEYCODE.ESC) {
700 speech.logEvent(LOG_TYPE.ACTION_CLOSE_OVERLAY);
701 speech.stop();
702 }
703 }
704};
705
706/**
707 * Displays the no match error if no interactions occur after some time while
708 * the interface is active. This is a safety net in case the onend event
709 * doesn't fire, or the user has persistent noise in the background, and does
710 * not speak. If a high confidence transcription was received, then this submits
711 * the search query instead of displaying an error.
712 * @private
713 */
714speech.onIdleTimeout_ = function() {
715 if (speech.finalResult_) {
716 speech.submitFinalResult_();
717 return;
718 }
719
720 switch (speech.currentState_) {
721 case speech.State_.STARTED:
722 case speech.State_.AUDIO_RECEIVED:
723 case speech.State_.SPEECH_RECEIVED:
724 case speech.State_.RESULT_RECEIVED:
725 case speech.State_.ERROR_RECEIVED:
726 speech.onErrorReceived_(RecognitionError.NO_MATCH);
727 break;
728 }
729};
730
731/**
732 * Aborts the speech recognition interface when the user switches to a new
733 * tab or window.
734 * @private
735 */
736speech.onVisibilityChange_ = function() {
737 if (speech.isUiDefinitelyHidden_()) {
738 return;
739 }
740
741 if (document.webkitHidden) {
742 speech.stop();
743 }
744};
745
746/**
747 * Aborts the speech session if the UI is showing and omnibox gets focused. Does
748 * not abort if the user is using keyboard navigation (i.e. tab).
749 */
750speech.onOmniboxFocused = function() {
751 if (!speech.isUiDefinitelyHidden_() && !speech.usingKeyboardNavigation_) {
752 speech.logEvent(LOG_TYPE.ACTION_CLOSE_OVERLAY);
753 speech.stop();
754 }
755};
756
757/**
758 * Change the location of this tab to the new URL. Used for query submission.
759 * @param {!URL} url The URL to navigate to.
760 * @private
761 */
762speech.navigateToUrl_ = function(url) {
763 window.location.href = url.href;
764};
765
766/**
767 * Submits the final spoken speech query to perform a search.
768 * @private
769 */
770speech.submitFinalResult_ = function() {
771 window.clearTimeout(speech.idleTimer_);
772 if (!speech.finalResult_) {
773 throw new Error('Submitting empty query.');
774 }
775
776 const searchParams = new URLSearchParams();
777 // Add the encoded query. Getting |speech.finalResult_| needs to happen
778 // before stopping speech.
779 searchParams.append('q', speech.finalResult_);
780 // Add a parameter to indicate that this request is a voice search.
781 searchParams.append('gs_ivs', '1');
782
783 // Build the query URL.
784 const queryUrl = new URL('/search', speech.googleBaseUrl_);
785 queryUrl.search = searchParams.toString();
786
787 speech.logEvent(LOG_TYPE.ACTION_QUERY_SUBMITTED);
788 speech.stop();
789 speech.navigateToUrl_(queryUrl);
790};
791
792/**
793 * Returns the error type based on the error string received from the webkit
794 * speech recognition API.
795 * @param {string} error The error string received from the webkit speech
796 * recognition API.
797 * @return {RecognitionError} The appropriate error state from
798 * the RecognitionError enum.
799 * @private
800 */
801speech.getRecognitionError_ = function(error) {
802 switch (error) {
803 case 'aborted':
804 return RecognitionError.ABORTED;
805 case 'audio-capture':
806 return RecognitionError.AUDIO_CAPTURE;
807 case 'bad-grammar':
808 return RecognitionError.BAD_GRAMMAR;
809 case 'language-not-supported':
810 return RecognitionError.LANGUAGE_NOT_SUPPORTED;
811 case 'network':
812 return RecognitionError.NETWORK;
813 case 'no-speech':
814 return RecognitionError.NO_SPEECH;
815 case 'not-allowed':
816 return RecognitionError.NOT_ALLOWED;
817 case 'service-not-allowed':
818 return RecognitionError.SERVICE_NOT_ALLOWED;
819 default:
820 return RecognitionError.OTHER;
821 }
822};
823
824/**
825 * Returns a timeout based on the error received from the webkit speech
826 * recognition API.
827 * @param {RecognitionError} error An error from the RecognitionError enum.
828 * @return {number} The appropriate timeout duration for displaying the error.
829 * @private
830 */
831speech.getRecognitionErrorTimeout_ = function(error) {
832 switch (error) {
833 case RecognitionError.AUDIO_CAPTURE:
834 case RecognitionError.NO_SPEECH:
835 case RecognitionError.NOT_ALLOWED:
836 case RecognitionError.SERVICE_NOT_ALLOWED:
837 case RecognitionError.NO_MATCH:
838 return speech.ERROR_TIMEOUT_LONG_MS_;
839 default:
840 return speech.ERROR_TIMEOUT_SHORT_MS_;
841 }
842};
843
844/**
845 * Resets the idle state timeout.
846 * @param {number} duration The duration after which to close the UI.
847 * @private
848 */
849speech.resetIdleTimer_ = function(duration) {
850 window.clearTimeout(speech.idleTimer_);
851 speech.idleTimer_ = window.setTimeout(speech.onIdleTimeout_, duration);
852};
853
854/**
855 * Resets the idle error state timeout.
856 * @param {number} duration The duration after which to close the UI during an
857 * error state.
858 * @private
859 */
860speech.resetErrorTimer_ = function(duration) {
861 window.clearTimeout(speech.errorTimer_);
862 speech.errorTimer_ = window.setTimeout(speech.stop, duration);
863};
864
865/**
866 * Check to see if the speech recognition interface is running, and has
867 * received any results.
868 * @return {boolean} True, if the speech recognition interface is running,
869 * and has received any results.
870 */
871speech.hasReceivedResults = function() {
872 return speech.currentState_ == speech.State_.RESULT_RECEIVED;
873};
874
875/**
876 * Check to see if the speech recognition interface is running.
877 * @return {boolean} True, if the speech recognition interface is running.
878 */
879speech.isRecognizing = function() {
880 switch (speech.currentState_) {
881 case speech.State_.STARTED:
882 case speech.State_.AUDIO_RECEIVED:
883 case speech.State_.SPEECH_RECEIVED:
884 case speech.State_.RESULT_RECEIVED:
885 return true;
886 }
887 return false;
888};
889
890/**
891 * Check if the controller is in a state where the UI is definitely hidden.
892 * Since we show the UI for a few seconds after we receive an error from the
893 * API, we need a separate definition to |speech.isRecognizing()| to indicate
894 * when the UI is hidden. <strong>Note:</strong> that if this function
895 * returns false, it might not necessarily mean that the UI is visible.
896 * @return {boolean} True if the UI is hidden.
897 * @private
898 */
899speech.isUiDefinitelyHidden_ = function() {
900 switch (speech.currentState_) {
901 case speech.State_.READY:
902 case speech.State_.UNINITIALIZED:
903 return true;
904 }
905 return false;
906};
907
908/**
909 * Handles click events during speech recognition.
910 * @param {boolean} shouldSubmit True if a query should be submitted.
911 * @param {boolean} shouldRetry True if the interface should be restarted.
912 * @param {boolean} navigatingAway True if the browser is navigating away
913 * from the NTP.
914 * @private
915 */
916speech.onClick_ = function(shouldSubmit, shouldRetry, navigatingAway) {
917 if (speech.finalResult_ && shouldSubmit) {
918 speech.submitFinalResult_();
919 } else if (speech.currentState_ == speech.State_.STOPPED && shouldRetry) {
920 speech.reset_();
921 speech.start();
922 } else if (speech.currentState_ == speech.State_.STOPPED && navigatingAway) {
923 // If the user clicks on a "Learn more" or "Details" support page link
924 // from an error message, do nothing, and let Chrome navigate to that page.
925 } else {
926 speech.logEvent(LOG_TYPE.ACTION_CLOSE_OVERLAY);
927 speech.stop();
928 }
929};
930
931/**
932 * @param {string} message
933 * @private
934 */
935speech.screenReaderAnnounce_ = function(message) {
936 const annoucer = $(speech.SCREEN_READER_ANNOUNCER_);
937 annoucer.innerText = '';
938 setTimeout(() => {
939 annoucer.innerText = message;
940 }, 100);
941};
942
943/* TEXT VIEW */
944
945/**
946 * Provides methods for styling and animating the text areas
947 * left of the microphone button.
948 */
949const text = {};
950
951/**
952 * ID for the "Try Again" link shown in error output.
953 * @const
954 */
955text.RETRY_LINK_ID = 'voice-retry-link';
956
957/**
958 * ID for the Voice Search support site link shown in error output.
959 * @const
960 */
961text.SUPPORT_LINK_ID = 'voice-support-link';
962
963/**
964 * Class for the links shown in error output.
965 * @const @private
966 */
967text.ERROR_LINK_CLASS_ = 'voice-text-link';
968
969/**
970 * Class name for the speech recognition result output area.
971 * @const @private
972 */
973text.TEXT_AREA_CLASS_ = 'voice-text';
974
975/**
976 * Class name for the "Listening..." text animation.
977 * @const @private
978 */
979text.LISTENING_ANIMATION_CLASS_ = 'listening-animation';
980
981/**
982 * ID of the final / high confidence speech recognition results element.
983 * @const @private
984 */
985text.FINAL_TEXT_AREA_ID_ = 'voice-text-f';
986
987/**
988 * ID of the interim / low confidence speech recognition results element.
989 * @const @private
990 */
991text.INTERIM_TEXT_AREA_ID_ = 'voice-text-i';
992
993/**
994 * The line height of the speech recognition results text.
995 * @const @private
996 */
997text.LINE_HEIGHT_ = 1.2;
998
999/**
1000 * Font size in the full page view in pixels.
1001 * @const @private
1002 */
1003text.FONT_SIZE_ = 32;
1004
1005/**
1006 * Delay in milliseconds before showing the initializing message.
1007 * @const @private
1008 */
1009text.INITIALIZING_TIMEOUT_MS_ = 300;
1010
1011/**
1012 * Delay in milliseconds before showing the listening message.
1013 * @const @private
1014 */
1015text.LISTENING_TIMEOUT_MS_ = 2000;
1016
1017/**
1018 * Base link target for help regarding voice search. To be appended
1019 * with a locale string for proper target site localization.
1020 * @const @private
1021 */
1022text.SUPPORT_LINK_BASE_ =
1023 'https://support.google.com/chrome/?p=ui_voice_search&hl=';
1024
1025/**
1026 * The final / high confidence speech recognition result element.
1027 * @private {Element}
1028 */
1029text.final_;
1030
1031/**
1032 * The interim / low confidence speech recognition result element.
1033 * @private {Element}
1034 */
1035text.interim_;
1036
1037/**
1038 * Stores the ID of the initializing message timer.
1039 * @private {number}
1040 */
1041text.initializingTimer_;
1042
1043/**
1044 * Stores the ID of the listening message timer.
1045 * @private {number}
1046 */
1047text.listeningTimer_;
1048
1049/**
1050 * Finds the text view elements.
1051 */
1052text.init = function() {
1053 text.final_ = $(text.FINAL_TEXT_AREA_ID_);
1054 text.interim_ = $(text.INTERIM_TEXT_AREA_ID_);
1055 text.clear();
1056};
1057
1058/**
1059 * Updates the text elements with new recognition results.
1060 * @param {string} interimText Low confidence speech recognition result text.
1061 * @param {string} opt_finalText High confidence speech recognition result
1062 * text, defaults to an empty string.
1063 */
1064text.updateTextArea = function(interimText, opt_finalText = '') {
1065 window.clearTimeout(text.initializingTimer_);
1066 text.clearListeningTimeout();
1067
1068 text.interim_.textContent = interimText;
1069 text.final_.textContent = opt_finalText;
1070
1071 text.interim_.className = text.final_.className = text.getTextClassName_();
1072};
1073
1074/**
1075 * Sets the text view to the initializing state. The initializing message
1076 * shown while waiting for permission is not displayed immediately, but after
1077 * a short timeout. The reason for this is that the "Waiting..." message would
1078 * still appear ("blink") every time a user opens Voice Search, even if they
1079 * have already granted and persisted microphone permission for the NTP,
1080 * and could therefore directly proceed to the "Speak now" message.
1081 */
1082text.showInitializingMessage = function() {
1083 text.interim_.textContent = '';
1084 text.final_.textContent = '';
1085
1086 const displayMessage = function() {
1087 if (text.interim_.textContent == '') {
1088 text.updateTextArea(speech.messages.waiting);
1089 }
1090 };
1091 text.initializingTimer_ =
1092 window.setTimeout(displayMessage, text.INITIALIZING_TIMEOUT_MS_);
1093};
1094
1095/**
1096 * Sets the text view to the ready state.
1097 */
1098text.showReadyMessage = function() {
1099 window.clearTimeout(text.initializingTimer_);
1100 text.clearListeningTimeout();
1101 text.updateTextArea(speech.messages.ready);
1102 text.startListeningMessageAnimation_();
1103};
1104
1105/**
1106 * Display an error message in the text area for the given error.
1107 * @param {RecognitionError} error The error that occured.
1108 */
1109text.showErrorMessage = function(error) {
1110 text.updateTextArea(text.getErrorMessage_(error));
1111
1112 const linkElement = text.getErrorLink_(error);
1113 // Setting textContent removes all children (no need to clear link elements).
1114 if (linkElement) {
1115 text.interim_.textContent += ' ';
1116 text.interim_.appendChild(linkElement);
1117 }
1118};
1119
1120/**
1121 * Returns an error message based on the error.
1122 * @param {RecognitionError} error The error that occured.
1123 * @private
1124 */
1125text.getErrorMessage_ = function(error) {
1126 switch (error) {
1127 case RecognitionError.NO_MATCH:
1128 return speech.messages.noTranslation;
1129 case RecognitionError.NO_SPEECH:
1130 return speech.messages.noVoice;
1131 case RecognitionError.AUDIO_CAPTURE:
1132 return speech.messages.audioError;
1133 case RecognitionError.NETWORK:
1134 return speech.messages.networkError;
1135 case RecognitionError.NOT_ALLOWED:
1136 case RecognitionError.SERVICE_NOT_ALLOWED:
1137 return speech.messages.permissionError;
1138 case RecognitionError.LANGUAGE_NOT_SUPPORTED:
1139 return speech.messages.languageError;
1140 default:
1141 return speech.messages.otherError;
1142 }
1143};
1144
1145/**
1146 * Returns an error message help link based on the error.
1147 * @param {RecognitionError} error The error that occured.
1148 * @private
1149 */
1150text.getErrorLink_ = function(error) {
1151 const linkElement = document.createElement('a');
1152 linkElement.className = text.ERROR_LINK_CLASS_;
1153
1154 switch (error) {
1155 case RecognitionError.NO_MATCH:
1156 linkElement.id = text.RETRY_LINK_ID;
1157 linkElement.tabIndex = '0';
1158 linkElement.textContent = speech.messages.tryAgain;
1159 // When clicked, |view.onWindowClick_| gets called.
1160 return linkElement;
1161 case RecognitionError.NO_SPEECH:
1162 case RecognitionError.AUDIO_CAPTURE:
1163 linkElement.id = text.SUPPORT_LINK_ID;
1164 linkElement.href = text.SUPPORT_LINK_BASE_ + getChromeUILanguage();
1165 linkElement.textContent = speech.messages.learnMore;
1166 linkElement.target = '_blank';
1167 return linkElement;
1168 case RecognitionError.NOT_ALLOWED:
1169 case RecognitionError.SERVICE_NOT_ALLOWED:
1170 linkElement.id = text.SUPPORT_LINK_ID;
1171 linkElement.href = text.SUPPORT_LINK_BASE_ + getChromeUILanguage();
1172 linkElement.textContent = speech.messages.details;
1173 linkElement.target = '_blank';
1174 return linkElement;
1175 default:
1176 return null;
1177 }
1178};
1179
1180/**
1181 * Clears the text elements.
1182 */
1183text.clear = function() {
1184 text.updateTextArea('');
1185
1186 text.clearListeningTimeout();
1187 window.clearTimeout(text.initializingTimer_);
1188
1189 text.interim_.className = text.TEXT_AREA_CLASS_;
1190 text.final_.className = text.TEXT_AREA_CLASS_;
1191};
1192
1193/**
1194 * Cancels listening message display.
1195 */
1196text.clearListeningTimeout = function() {
1197 window.clearTimeout(text.listeningTimer_);
1198};
1199
1200/**
1201 * Determines the class name of the text output Elements.
1202 * @return {string} The class name.
1203 * @private
1204 */
1205text.getTextClassName_ = function() {
1206 // Shift up for every line.
1207 const oneLineHeight = text.LINE_HEIGHT_ * text.FONT_SIZE_ + 1;
1208 const twoLineHeight = text.LINE_HEIGHT_ * text.FONT_SIZE_ * 2 + 1;
1209 const threeLineHeight = text.LINE_HEIGHT_ * text.FONT_SIZE_ * 3 + 1;
1210 const fourLineHeight = text.LINE_HEIGHT_ * text.FONT_SIZE_ * 4 + 1;
1211
1212 const height = text.interim_.scrollHeight;
1213 let className = text.TEXT_AREA_CLASS_;
1214
1215 if (height > fourLineHeight) {
1216 className += ' voice-text-5l';
1217 } else if (height > threeLineHeight) {
1218 className += ' voice-text-4l';
1219 } else if (height > twoLineHeight) {
1220 className += ' voice-text-3l';
1221 } else if (height > oneLineHeight) {
1222 className += ' voice-text-2l';
1223 }
1224 return className;
1225};
1226
1227/**
1228 * Displays the listening message animation after the ready message has been
1229 * shown for |text.LISTENING_TIMEOUT_MS_| milliseconds without further user
1230 * action.
1231 * @private
1232 */
1233text.startListeningMessageAnimation_ = function() {
1234 const animateListeningText = function() {
1235 // If speech is active with no results yet, show the message and animation.
1236 if (speech.isRecognizing() && !speech.hasReceivedResults()) {
1237 text.updateTextArea(speech.messages.listening);
1238 text.interim_.classList.add(text.LISTENING_ANIMATION_CLASS_);
1239 }
1240 };
1241
1242 text.listeningTimer_ =
1243 window.setTimeout(animateListeningText, text.LISTENING_TIMEOUT_MS_);
1244};
1245
1246/* END TEXT VIEW */
1247
1248/* MICROPHONE VIEW */
1249
1250/**
1251 * Provides methods for animating the microphone button and icon
1252 * on the Voice Search full screen overlay.
1253 */
1254const microphone = {};
1255
1256/**
1257 * ID for the button Element.
1258 * @const
1259 */
1260microphone.RED_BUTTON_ID = 'voice-button';
1261
1262/**
1263 * ID for the level animations Element that indicates input volume.
1264 * @const @private
1265 */
1266microphone.LEVEL_ID_ = 'voice-level';
1267
1268/**
1269 * ID for the container of the microphone, red button and level animations.
1270 * @const @private
1271 */
1272microphone.CONTAINER_ID_ = 'voice-button-container';
1273
1274/**
1275 * The minimum transform scale for the volume rings.
1276 * @const @private
1277 */
1278microphone.LEVEL_SCALE_MINIMUM_ = 0.5;
1279
1280/**
1281 * The range of the transform scale for the volume rings.
1282 * @const @private
1283 */
1284microphone.LEVEL_SCALE_RANGE_ = 0.55;
1285
1286/**
1287 * The minimum transition time (in milliseconds) for the volume rings.
1288 * @const @private
1289 */
1290microphone.LEVEL_TIME_STEP_MINIMUM_ = 170;
1291
1292/**
1293 * The range of the transition time for the volume rings.
1294 * @const @private
1295 */
1296microphone.LEVEL_TIME_STEP_RANGE_ = 10;
1297
1298/**
1299 * The button with the microphone icon.
1300 * @private {Element}
1301 */
1302microphone.button_;
1303
1304/**
1305 * The voice level element that is displayed when the user starts speaking.
1306 * @private {Element}
1307 */
1308microphone.level_;
1309
1310/**
1311 * Variable to indicate whether level animations are underway.
1312 * @private {boolean}
1313 */
1314microphone.isLevelAnimating_ = false;
1315
1316/**
1317 * Creates/finds the output elements for the microphone rendering and animation.
1318 */
1319microphone.init = function() {
1320 // Get the button element and microphone container.
1321 microphone.button_ = $(microphone.RED_BUTTON_ID);
1322
1323 // Get the animation elements.
1324 microphone.level_ = $(microphone.LEVEL_ID_);
1325};
1326
1327/**
1328 * Starts the volume circles animations, if it has not started yet.
1329 */
1330microphone.startInputAnimation = function() {
1331 if (!microphone.isLevelAnimating_) {
1332 microphone.isLevelAnimating_ = true;
1333 microphone.runLevelAnimation_();
1334 }
1335};
1336
1337/**
1338 * Stops the volume circles animations.
1339 */
1340microphone.stopInputAnimation = function() {
1341 microphone.isLevelAnimating_ = false;
1342};
1343
1344/**
1345 * Runs the volume level animation.
1346 * @private
1347 */
1348microphone.runLevelAnimation_ = function() {
1349 if (!microphone.isLevelAnimating_) {
1350 microphone.level_.style.removeProperty('opacity');
1351 microphone.level_.style.removeProperty('transition');
1352 microphone.level_.style.removeProperty('transform');
1353 return;
1354 }
1355 const scale = microphone.LEVEL_SCALE_MINIMUM_ +
1356 Math.random() * microphone.LEVEL_SCALE_RANGE_;
1357 const timeStep = Math.round(
1358 microphone.LEVEL_TIME_STEP_MINIMUM_ +
1359 Math.random() * microphone.LEVEL_TIME_STEP_RANGE_);
1360 microphone.level_.style.setProperty(
1361 'transition', 'transform ' + timeStep + 'ms ease-in-out');
1362 microphone.level_.style.setProperty('transform', 'scale(' + scale + ')');
1363 window.setTimeout(microphone.runLevelAnimation_, timeStep);
1364};
1365
1366/* END MICROPHONE VIEW */
1367
1368/* VIEW */
1369
1370/**
1371 * Provides methods for manipulating and animating the Voice Search
1372 * full screen overlay.
1373 */
1374const view = {};
1375
1376/**
1377 * ID for the close button in the speech output container.
1378 * @const
1379 */
1380view.CLOSE_BUTTON_ID = 'voice-close-button';
1381
1382/**
1383 * Class name of the speech recognition interface on the homepage.
1384 * @const @private
1385 */
1386view.OVERLAY_CLASS_ = 'overlay';
1387
1388/**
1389 * Class name of the speech recognition interface when it is hidden on the
1390 * homepage.
1391 * @const @private
1392 */
1393view.OVERLAY_HIDDEN_CLASS_ = 'overlay-hidden';
1394
1395/**
1396 * ID for the dialog that contains the speech recognition interface.
1397 * @const @private
1398 */
1399view.DIALOG_ID_ = 'voice-overlay-dialog';
1400
1401/**
1402 * ID for the speech output background.
1403 * @const @private
1404 */
1405view.BACKGROUND_ID_ = 'voice-overlay';
1406
1407/**
1408 * ID for the speech output container.
1409 * @const @private
1410 */
1411view.CONTAINER_ID_ = 'voice-outer';
1412
1413/**
1414 * Class name used to modify the UI to the 'listening' state.
1415 * @const @private
1416 */
1417view.MICROPHONE_LISTENING_CLASS_ = 'outer voice-ml';
1418
1419/**
1420 * Class name used to modify the UI to the 'receiving speech' state.
1421 * @const @private
1422 */
1423view.RECEIVING_SPEECH_CLASS_ = 'outer voice-rs';
1424
1425/**
1426 * Class name used to modify the UI to the 'error received' state.
1427 * @const @private
1428 */
1429view.ERROR_RECEIVED_CLASS_ = 'outer voice-er';
1430
1431/**
1432 * Class name used to modify the UI to the inactive state.
1433 * @const @private
1434 */
1435view.INACTIVE_CLASS_ = 'outer';
1436
1437/**
1438 * Background element and container of all other elements.
1439 * @private {Element}
1440 */
1441view.background_;
1442
1443/**
1444 * The container used to position the microphone and text output area.
1445 * @private {Element}
1446 */
1447view.container_;
1448
1449/**
1450 * True if the the last error message shown was for the 'no-match' error.
1451 * @private {boolean}
1452 */
1453view.isNoMatchShown_ = false;
1454
1455/**
1456 * True if the UI elements are visible.
1457 * @private {boolean}
1458 */
1459view.isVisible_ = false;
1460
1461/**
1462 * The function to call when there is a click event.
1463 * @private {Function}
1464 */
1465view.onClick_;
1466
1467/**
1468 * Displays the UI.
1469 */
1470view.show = function() {
1471 if (!view.isVisible_) {
1472 text.showInitializingMessage();
1473 view.showView_();
1474 window.addEventListener('click', view.onWindowClick_, false);
1475 }
1476};
1477
1478/**
1479 * Sets the output area text to listening. This should only be called when
1480 * the Web Speech API starts receiving audio input (i.e., onaudiostart).
1481 */
1482view.setReadyForSpeech = function() {
1483 if (view.isVisible_) {
1484 view.container_.className = view.MICROPHONE_LISTENING_CLASS_;
1485 text.showReadyMessage();
1486 }
1487};
1488
1489/**
1490 * Shows the pulsing animation emanating from the microphone. This should only
1491 * be called when the Web Speech API starts receiving speech input (i.e.,
1492 * |onspeechstart|). Do note that this may also be run when the Web Speech API
1493 * is receiving speech recognition results (|onresult|), because |onspeechstart|
1494 * may not have been called.
1495 */
1496view.setReceivingSpeech = function() {
1497 if (view.isVisible_) {
1498 view.container_.className = view.RECEIVING_SPEECH_CLASS_;
1499 microphone.startInputAnimation();
1500 text.clearListeningTimeout();
1501 }
1502};
1503
1504/**
1505 * Updates the speech recognition results output with the latest results.
1506 * @param {string} interimResultText Low confidence recognition text (grey).
1507 * @param {string} finalResultText High confidence recognition text (black).
1508 */
1509view.updateSpeechResult = function(interimResultText, finalResultText) {
1510 if (view.isVisible_) {
1511 // If the Web Speech API is receiving speech recognition results
1512 // (|onresult|) and |onspeechstart| has not been called.
1513 if (view.container_.className != view.RECEIVING_SPEECH_CLASS_) {
1514 view.setReceivingSpeech();
1515 }
1516 text.updateTextArea(interimResultText, finalResultText);
1517 }
1518};
1519
1520/**
1521 * Hides the UI and stops animations.
1522 */
1523view.hide = function() {
1524 window.removeEventListener('click', view.onWindowClick_, false);
1525 view.stopMicrophoneAnimations_();
1526 view.hideView_();
1527 view.isNoMatchShown_ = false;
1528 text.clear();
1529};
1530
1531/**
1532 * Find the page elements that will be used to render the speech recognition
1533 * interface area.
1534 * @param {Function} onClick The function to call when there is a click event
1535 * in the window.
1536 */
1537view.init = function(onClick) {
1538 view.onClick_ = onClick;
1539
1540 view.dialog_ = $(view.DIALOG_ID_);
1541 view.background_ = $(view.BACKGROUND_ID_);
1542 view.container_ = $(view.CONTAINER_ID_);
1543
1544 text.init();
1545 microphone.init();
1546};
1547
1548/**
1549 * Sets accessibility titles/labels for the page elements.
1550 * @param {!Object} translatedStrings Dictionary of localized title strings.
1551 */
1552view.setTitles = function(translatedStrings) {
1553 const closeButton = $(view.CLOSE_BUTTON_ID);
1554 closeButton.title = translatedStrings.voiceCloseTooltip;
1555 closeButton.setAttribute('aria-label', translatedStrings.voiceCloseTooltip);
1556};
1557
1558/**
1559 * Displays an error message and stops animations.
1560 * @param {RecognitionError} error The error type.
1561 */
1562view.showError = function(error) {
1563 view.container_.className = view.ERROR_RECEIVED_CLASS_;
1564 text.showErrorMessage(error);
1565 view.stopMicrophoneAnimations_();
1566 view.isNoMatchShown_ = (error == RecognitionError.NO_MATCH);
1567};
1568
1569/**
1570 * Makes the view visible.
1571 * @private
1572 */
1573view.showView_ = function() {
1574 if (!view.isVisible_) {
1575 view.dialog_.showModal();
1576 view.background_.className = view.OVERLAY_HIDDEN_CLASS_;
1577 view.background_.className = view.OVERLAY_CLASS_;
1578 view.isVisible_ = true;
1579 }
1580};
1581
1582/**
1583 * Hides the view.
1584 * @private
1585 */
1586view.hideView_ = function() {
1587 view.dialog_.close();
1588 view.background_.className = view.OVERLAY_HIDDEN_CLASS_;
1589 view.container_.className = view.INACTIVE_CLASS_;
1590 view.background_.removeAttribute('style');
1591 view.isVisible_ = false;
1592};
1593
1594/**
1595 * Stops the animations in the microphone view.
1596 * @private
1597 */
1598view.stopMicrophoneAnimations_ = function() {
1599 microphone.stopInputAnimation();
1600};
1601
1602/**
1603 * Makes sure that a click anywhere closes the UI when it is active.
1604 * @param {!Event} event The click event.
1605 * @private
1606 */
1607view.onWindowClick_ = function(event) {
1608 if (!view.isVisible_) {
1609 return;
1610 }
1611 const retryLinkClicked = event.target.id === text.RETRY_LINK_ID;
1612 const supportLinkClicked = event.target.id === text.SUPPORT_LINK_ID;
1613 const micIconClicked = event.target.id === microphone.RED_BUTTON_ID;
1614
1615 const submitQuery = micIconClicked && !view.isNoMatchShown_;
1616 const shouldRetry =
1617 retryLinkClicked || (micIconClicked && view.isNoMatchShown_);
1618 const navigatingAway = supportLinkClicked;
1619
1620 speech.usingKeyboardNavigation_ = false;
1621
1622 if (shouldRetry) {
1623 if (micIconClicked) {
1624 speech.logEvent(LOG_TYPE.ACTION_TRY_AGAIN_MIC_BUTTON);
1625 } else if (retryLinkClicked) {
1626 speech.logEvent(LOG_TYPE.ACTION_TRY_AGAIN_LINK);
1627 }
1628 }
1629 if (supportLinkClicked) {
1630 speech.logEvent(LOG_TYPE.ACTION_SUPPORT_LINK_CLICKED);
1631 }
1632
1633 view.onClick_(submitQuery, shouldRetry, navigatingAway);
1634};
1635
1636/* END VIEW */