Skip to content

Instantly share code, notes, and snippets.

@cafedeaqua
Created May 18, 2017 22:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cafedeaqua/e67fcb422e3f331810d3930f45e12ff6 to your computer and use it in GitHub Desktop.
Save cafedeaqua/e67fcb422e3f331810d3930f45e12ff6 to your computer and use it in GitHub Desktop.
How to use SpeechToText and how to know N voice recognition text from your voice
var speechText;
/**
* The callback to prepare a segment for play.
* @param {string} trigger The trigger type of a segment.
* @param {object} args The input arguments.
*/
da.segment.onpreprocess = function (trigger, args) {
console.log('[SpeechToText] onpreprocess', {trigger: trigger, args: args});
speechText = "";
da.startSegment(trigger, args);
};
/**
* The callback to resume a segment for play.
*/
da.segment.onresume = function () {
console.log('onresume');
var synthesis = da.SpeechSynthesis.getInstance();
if (speechText !== "") {
var entry = {
domain: "Input Speech Text",
extension: {},
title: speechText,
url: "https://translate.google.co.jp/?hl=ja#en/ja/" + speechText,
// imageUrl: "http://www.sony.net/SonyInfo/News/Press/201603/16-025E/img01.gif",
date: new Date().toISOString()
};
da.addTimeline({entries: [entry]});
}
synthesis.speak('You said.' + speechText + ". This text is saved on Host Application Timeline tab.", {
onstart: function () {
console.log('[SpeechToText] speak start');
},
onend: function () {
console.log('[SpeechToText] speak onend');
da.stopSegment();
},
onerror: function (error) {
console.log('[SpeechToText] speak cancel: ' + error.message);
da.stopSegment();
}
});
};
/**
* The callback to start a segment.
* @param {string} trigger The trigger type of a segment.
* @param {object} args The input arguments.
*/
da.segment.onstart = function (trigger, args) {
console.log('[SpeechToText] onstart', {trigger: trigger, args: args});
var synthesis = da.SpeechSynthesis.getInstance();
//var ttsText = SpeechRecogResult !== '' ? SpeechRecogResult : 'Launch text is empty';
var ttsText = 'Please say anythings';
synthesis.speak(ttsText, {
onstart: function () {
console.log('[SpeechToText] speak start');
},
onend: function () {
console.log('[SpeechToText] speak onend');
var speechToText = new da.SpeechToText();
speechToText.startSpeechToText(callbackobject);
},
onerror: function (error) {
console.log('[SpeechToText] speak cancel: ' + error.message);
da.stopSegment();
}
});
}
var callbackobject = {
onsuccess: function (results) {
console.log('[SpeechToText] : SpeechToText process has finished successfully');
console.log('[SpeechToText] : Results = ' + results);
var strResults = results.join(" ");
speechText = strResults;
},
onerror: function (error) {
console.log('[SpeechToText] : SpeechToText error message = ' + error.message)
console.log('[SpeechToText] : SpeechToText error code = ' + error.code)
var synthesis = da.SpeechSynthesis.getInstance();
synthesis.speak('The speech to text API could not recognize what you said. Reason is ' + error.message, {
onstart: function () {
console.log('[SpeechToText] error message speak start');
},
onend: function () {
console.log('[SpeechToText] error message speak onend');
da.stopSegment();
},
onerror: function (error) {
console.log('[SpeechToText] error message speak cancel: ' + error.message);
da.stopSegment();
}
});
}
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment