Skip to content

Instantly share code, notes, and snippets.

@mecab
Last active September 10, 2021 04:38
Show Gist options
  • Save mecab/e8967ae7282c46001359a5d107282a88 to your computer and use it in GitHub Desktop.
Save mecab/e8967ae7282c46001359a5d107282a88 to your computer and use it in GitHub Desktop.
const API_KEY = 'ここにAPIキーを貼る';
const url = `https://api.apigw.smt.docomo.ne.jp/aiTalk/v1/textToSpeech?APIKEY=${API_KEY}`;
document.addEventListener('onEventReceived', async function(obj) {
if (obj.detail.command == 'PRIVMSG') {
play(obj.detail.body, 'maki');
}
});
async function play(text, who) {
const result = await fetch(new URL(url), {
method: 'POST',
body: `<?xml version="1.0" encoding="utf-8" ?>
<speak version="1.1"><voice name="${who}">${text}</voice></speak>`,
headers: new Headers({
'Content-Type': 'application/ssml+xml;charset=UTF-8',
'Accept': 'audio/L16',
})
});
const audioRawBuffer = await result.arrayBuffer();
const audioDataView = new DataView(audioRawBuffer);
const audioContext = new AudioContext();
const audioBuffer = audioContext.createBuffer(1, audioDataView.byteLength / 2, 16000);
const channel = audioBuffer.getChannelData(0);
for (let i = 0; i < audioDataView.byteLength; i += 2) {
const value = audioDataView.getInt16(i, false);
channel[i / 2] = value / 32768.0;
}
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.start();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment