Skip to content

Instantly share code, notes, and snippets.

@cbattlegear
Created October 16, 2019 14:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cbattlegear/98899e58e6c0bb14d84c0f215a67cdff to your computer and use it in GitHub Desktop.
Save cbattlegear/98899e58e6c0bb14d84c0f215a67cdff to your computer and use it in GitHub Desktop.
using System;
using System.IO;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.WebJobs;
using Microsoft.Azure.WebJobs.Extensions.Http;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json;
using Microsoft.CognitiveServices.Speech;
using Microsoft.CognitiveServices.Speech.Audio;
using Microsoft.CognitiveServices.Speech.Intent;
using System.Diagnostics;
using System.Net;
namespace MistySpeechRecognition
{
public static class IntentDetection
{
[FunctionName("IntentDetection")]
public static async Task<IActionResult> Run(
[HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req,
ILogger log)
{
log.LogInformation("C# HTTP trigger function processed a request.");
Stopwatch timer = new Stopwatch();
timer.Start();
//string name = req.Query["name"];
//string requestBody = await new StreamReader(req.Body).ReadToEndAsync();
// Creates an instance of a speech config with specified subscription key
// and service region. Note that in contrast to other services supported by
// the Cognitive Services Speech SDK, the Language Understanding service
// requires a specific subscription key from https://www.luis.ai/.
// The Language Understanding service calls the required key 'endpoint key'.
// Once you've obtained it, replace with below with your own Language Understanding subscription key
// and service region (e.g., "westus").
var config = SpeechConfig.FromSubscription("YourLUISSubKey", "eastus2");
// Creates an intent recognizer using file as audio input.
// Replace with your own audio file name.
string filename = "D:\\local\\Temp\\" + Guid.NewGuid().ToString() + ".wav";
#if DEBUG
filename = "C:\\Temp\\" + Guid.NewGuid().ToString() + ".wav";
#endif
string requestBody = await new StreamReader(req.Body).ReadToEndAsync();
dynamic data = JsonConvert.DeserializeObject(requestBody);
string SoundData = data?.SoundData;
string outfile = "";
Byte[] soundbytes = Convert.FromBase64String(SoundData);
File.WriteAllBytes(filename, soundbytes);
log.LogInformation(timer.Elapsed.ToString());
using (var audioInput = AudioConfig.FromWavFileInput(filename))
{
using (var recognizer = new IntentRecognizer(config, audioInput))
{
// The TaskCompletionSource to stop recognition.
var stopRecognition = new TaskCompletionSource<int>();
// Creates a Language Understanding model using the app id, and adds specific intents from your model
var model = LanguageUnderstandingModel.FromAppId("YourLUISAppID");
recognizer.AddAllIntents(model);
// Subscribes to events.
//recognizer.Recognizing += (s, e) => {
// log.LogInformation($"RECOGNIZING: Text={e.Result.Text}");
//};
recognizer.Recognized += (s, e) => {
if (e.Result.Reason == ResultReason.RecognizedIntent)
{
log.LogInformation(timer.Elapsed.ToString());
log.LogInformation($"RECOGNIZED: Text={e.Result.Text}");
log.LogInformation($" Intent Id: {e.Result.IntentId}.");
log.LogInformation($" Language Understanding JSON: {e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
switch (e.Result.IntentId)
{
case "None":
outfile = SynthesisToWaveFileAsync("Sorry, I missed that.", log);
break;
case "HowAreYou":
outfile = SynthesisToWaveFileAsync("Doing super!", log);
break;
case "Weather.CheckWeatherValue":
outfile = SynthesisToWaveFileAsync(GetWeatherScript(), log);
break;
}
}
else if (e.Result.Reason == ResultReason.RecognizedSpeech)
{
log.LogInformation($"RECOGNIZED: Text={e.Result.Text}");
log.LogInformation($" Intent not recognized.");
}
else if (e.Result.Reason == ResultReason.NoMatch)
{
log.LogInformation($"NOMATCH: Speech could not be recognized.");
}
};
recognizer.Canceled += (s, e) => {
log.LogInformation($"CANCELED: Reason={e.Reason}");
if (e.Reason == CancellationReason.Error)
{
log.LogInformation($"CANCELED: ErrorCode={e.ErrorCode}");
log.LogInformation($"CANCELED: ErrorDetails={e.ErrorDetails}");
log.LogInformation($"CANCELED: Did you update the subscription info?");
}
stopRecognition.TrySetResult(0);
};
recognizer.SessionStarted += (s, e) => {
log.LogInformation("\n Session started event.");
};
recognizer.SessionStopped += (s, e) => {
log.LogInformation("\n Session stopped event.");
log.LogInformation("\nStop recognition.");
stopRecognition.TrySetResult(0);
};
await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
// Waits for completion.
// Use Task.WaitAny to keep the task rooted.
log.LogInformation(timer.Elapsed.ToString());
Task.WaitAny(new[] { stopRecognition.Task });
log.LogInformation(timer.Elapsed.ToString());
// Stops recognition.
await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
}
}
//File.Delete(filename);
timer.Stop();
log.LogInformation(timer.Elapsed.ToString());
if(outfile == "")
{
outfile = SynthesisToWaveFileAsync("Sorry, I missed that.", log);
}
log.LogInformation(timer.Elapsed.ToString());
return (ActionResult)new FileStreamResult(File.OpenRead(outfile), "audio/x-wav");
}
public static string SynthesisToWaveFileAsync(string whattosay, ILogger log)
{
// Creates an instance of a speech config with specified subscription key and service region.
// Replace with your own subscription key and service region (e.g., "westus").
// The default language is "en-us".
var config = SpeechConfig.FromSubscription("YourSpeechKeyHere", "eastus2");
config.SpeechSynthesisVoiceName = "Microsoft Server Speech Text to Speech Voice (en-US, JessaRUS)";
// Creates a speech synthesizer using file as audio output.
// Replace with your own audio file name.
string fileName = "D:\\local\\Temp\\" + Guid.NewGuid().ToString() + "output.wav";
#if DEBUG
fileName = "C:\\Temp\\" + Guid.NewGuid().ToString() + "output.wav";
#endif
using (var fileOutput = AudioConfig.FromWavFileOutput(fileName))
using (var synthesizer = new SpeechSynthesizer(config, fileOutput))
{
string text = whattosay;
using (var result = synthesizer.SpeakTextAsync(text).Result)
{
if (result.Reason == ResultReason.SynthesizingAudioCompleted)
{
log.LogInformation($"Speech synthesized for text [{text}], and the audio was saved to [{fileName}]");
}
else if (result.Reason == ResultReason.Canceled)
{
var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
log.LogInformation($"CANCELED: Reason={cancellation.Reason}");
if (cancellation.Reason == CancellationReason.Error)
{
log.LogInformation($"CANCELED: ErrorCode={cancellation.ErrorCode}");
log.LogInformation($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
log.LogInformation($"CANCELED: Did you update the subscription info?");
}
}
}
}
return fileName;
}
public static string GetWeatherScript()
{
var client = new WebClient();
var response = client.DownloadString("http://api.weatherstack.com/current?access_key=YourKeyHere&query=Fargo,%20ND&units=f");
dynamic data = JsonConvert.DeserializeObject(response);
string windstatement = "";
if(data.current.wind_speed >= 5)
{
windstatement = "There's also a slight breeze.";
}
if (data.wind_speed >= 10)
{
windstatement = "It's also breezy out.";
}
if (data.wind_speed >= 15)
{
windstatement = "It's also windy out there.";
}
if (data.wind_speed >= 25)
{
windstatement = "You also better hold onto your hat, she's blowing out there!";
}
return "It is " + data.current.temperature + " degrees outside and is " + data.current.weather_descriptions[0] + ". " + windstatement;
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment