Skip to content

Instantly share code, notes, and snippets.

@dhaniksahni
Last active March 3, 2020 18:42
Show Gist options
  • Save dhaniksahni/615d646abe6ef87ffa3747959cb8d93c to your computer and use it in GitHub Desktop.
Save dhaniksahni/615d646abe6ef87ffa3747959cb8d93c to your computer and use it in GitHub Desktop.
Google Speech Translation in Salesforce Apex
public class AudioData {
public class Audio {
public String content;
}
public class Config {
public String encoding;
public Integer sampleRateHertz;
public String languageCode;
public Boolean enableWordTimeOffsets;
}
public Config config;
public Audio audio;
public static AudioData parse(String json) {
return (AudioData) System.JSON.deserialize(json, AudioData.class);
}
}
public class AudioResponseData {
public class SpeechRecognitionResult
{
public List<SpeechRecognitionAlternative> alternatives;
public string channelTag;
public string languageCode;
}
public class SpeechRecognitionAlternative
{
public string transcript;
public string confidence;
public List<WordInfo> words;
}
public class WordInfo
{
public string startTime;
public string endTime;
public string word;
public string confidence;
public string speakerTag;
}
public List<SpeechRecognitionResult> results;
public static AudioResponseData parse(String json) {
return (AudioResponseData) System.JSON.deserialize(json, AudioResponseData.class);
}
}
public class GoogleSpeechService {
public static string getAccessToken()
{
GoogleAuthSetting__mdt mapping =
[SELECT AccessToken__c, Label FROM GoogleAuthSetting__mdt WHERE Label='AccessToken' and DeveloperName='AccessToken'];
return mapping.AccessToken__c;
}
@AuraEnabled
public static string GetTranscript(string recordid)
{
List<ContentDocumentLink> links=[SELECT ContentDocumentId,LinkedEntityId FROM ContentDocumentLink where LinkedEntityId=:recordId];
Set<Id> ids=new Set<Id>();
for(ContentDocumentLink link:links)
{
ids.add(link.ContentDocumentId);
}
List<ContentVersion> versions=[SELECT VersionData,Title,ContentDocumentId,FileExtension FROM ContentVersion WHERE ContentDocumentId = :ids AND IsLatest = true];
blob file_body;
if(versions!=null && versions.size()>0)
{
file_body=versions[0].VersionData;
}
HttpRequest req = new HttpRequest();
req.setHeader('Authorization','Bearer '+ getAccessToken());
req.setHeader('Content-Type','application/json; charset=utf-8');
req.setMethod('POST');
req.setEndpoint('https://speech.googleapis.com/v1p1beta1/speech:recognize');
req.setTimeout(120000);
AudioData data=new AudioData();
AudioData.Audio audio=new AudioData.Audio();
audio.content=EncodingUtil.base64Encode(file_body);
data.audio=audio;
AudioData.Config config=new AudioData.Config();
config.encoding='MP3';
config.sampleRateHertz=16000;
config.languageCode='en-US';
config.enableWordTimeOffsets=false;
data.config=config;
string jsondta=system.JSON.serialize(data);
system.debug('jsondta:'+jsondta);
req.setBody(jsondta);
Http http = new Http();
HTTPResponse res = http.send(req);
AudioResponseData responseData=AudioResponseData.parse(res.getBody());
system.debug('res:'+res.getBody());
string content='';
for(AudioResponseData.SpeechRecognitionResult recognitionResult:responseData.results)
{
for(AudioResponseData.SpeechRecognitionAlternative alternative: recognitionResult.alternatives)
{
content=content+'\n'+alternative.transcript;
}
}
CaseComment newCommmand = new CaseComment();
newCommmand.CommentBody = content;
newCommmand.IsPublished = TRUE;
newCommmand.ParentId = recordid;
insert newCommmand;
system.debug('res:'+newCommmand.id);
return newCommmand.id;
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment