Skip to content

Instantly share code, notes, and snippets.

@PonDad
Last active May 31, 2017 12:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save PonDad/6b27864666bb493dca074e895ab0afa7 to your computer and use it in GitHub Desktop.
Save PonDad/6b27864666bb493dca074e895ab0afa7 to your computer and use it in GitHub Desktop.
Google Assistant で 深層学習 - KerasをRaspberry Piで音声操作する
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
root_dir = "./image/"
categories = ["red_apple", "green_apple"]
nb_classes = len(categories)
image_size = 32
def main():
X_train, X_test, y_train, y_test = np.load("./image/apple.npy")
X_train = X_train.astype("float") / 256
X_test = X_test.astype("float") / 256
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = model_train(X_train, y_train)
model_eval(model, X_test, y_test)
def build_model(in_shape):
model = Sequential()
model.add(Conv2D(32, 3, 3,
border_mode='same',
input_shape=in_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, 3, 3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
#model.compile(loss='binary_crossentropy',
#optimizer='rmsprop',
# metrics=['accuracy'])
return model
def model_train(X, y):
model = build_model(X.shape[1:])
print(X.shape[1:])
history = model.fit(X, y, batch_size=32, nb_epoch=10, validation_split=0.1)
hdf5_file = "./image/apple-model.h5"
model.save_weights(hdf5_file)
return model
def model_eval(model, X, y):
score = model.evaluate(X, y)
print('loss=', score[0])
print('accuracy=', score[1])
if __name__ == "__main__":
main()
// Copyright 2016, Google, Inc.
// Licensed under the Apache License, Version 2.0 (the 'License');
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an 'AS IS' BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const App = require('actions-on-google').ApiAiApp;
const exec = require('child_process').exec;
const camera_adress = "https://a820c9de.ap.ngrok.io/camera"
const what_adress = "https://a820c9de.ap.ngrok.io/what"
// [START YourAction]
exports.myAction = (request, response) => {
const app = new App({request, response});
console.log('Request headers: ' + JSON.stringify(request.headers));
console.log('Request body: ' + JSON.stringify(request.body));
// Fulfill action business logic
function responseTalk (app) {
// Complete your fulfillment logic and send a response
//app.ask('Hello, World!');
let text_to_speech = '<speak>'
+ " It's like I'm reading a book <break time='1' /> and it's a book I deeply love. <break time='3' />"
+ "But I'm reading it slowly now.<break time='3' />"
+ "So the words are really far apart and the spaces between the words are almost infinite.<break time='3' />"
+ "I can still feel you <break time='1' /> and the words of our story <break time='1' /> but it's in this endless space between the words that I'm finding myself now.<break time='3' />"
+ "It's a place that's not of the physical world. "
+ '</speak>'
app.ask(text_to_speech);
}
function responseCamera (app) {
exec(`curl ${camera_adress}`, (error, stdout, stderr) => {
if (error) {
console.error(`exec error: ${error}`);
return;
}
console.log(`${stdout}`);
app.ask(`<speak>I took <break time='1' /></speak>` + `${stdout}`);
});
}
function responseWhat (app) {
exec(`curl ${what_adress}`, (error, stdout, stderr) => {
if (error) {
console.error(`exec error: ${error}`);
return;
}
console.log(`${stdout}`);
app.ask(`<speak>This is a <break time='1' /></speak>` + `${stdout}`);
});
}
function responseCorrect (app) {
app.ask(`<speak>Thank you.</speak>`);
}
const actionMap = new Map();
actionMap.set('talk.why', responseTalk);
actionMap.set('talk.camera', responseCamera);
actionMap.set('talk.what', responseWhat);
actionMap.set('talk.correct', responseCorrect);
app.handleRequest(actionMap);
};
// [END YourAction]
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that implements gRPC client for Google Assistant API."""
import json
import logging
import os.path
import RPi.GPIO as GPIO
import click
import grpc
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
from google.assistant.embedded.v1alpha1 import embedded_assistant_pb2
from google.rpc import code_pb2
from tenacity import retry, stop_after_attempt, retry_if_exception
try:
from . import (
assistant_helpers,
audio_helpers,
webcamera
)
except SystemError:
import assistant_helpers
import audio_helpers
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(23, GPIO.OUT)
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE
DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON
CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
class SampleAssistant(object):
"""Sample Assistant that supports follow-on conversations.
Args:
conversation_stream(ConversationStream): audio stream
for recording query and playing back assistant answer.
channel: authorized gRPC channel for connection to the
Google Assistant API.
deadline_sec: gRPC deadline in seconds for Google Assistant API call.
"""
def __init__(self, conversation_stream, channel, deadline_sec):
self.conversation_stream = conversation_stream
# Opaque blob provided in ConverseResponse that,
# when provided in a follow-up ConverseRequest,
# gives the Assistant a context marker within the current state
# of the multi-Converse()-RPC "conversation".
# This value, along with MicrophoneMode, supports a more natural
# "conversation" with the Assistant.
self.conversation_state = None
# Create Google Assistant API gRPC client.
self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(channel)
self.deadline = deadline_sec
def __enter__(self):
return self
def __exit__(self, etype, e, traceback):
if e:
return False
self.conversation_stream.close()
def is_grpc_error_unavailable(e):
is_grpc_error = isinstance(e, grpc.RpcError)
if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
logging.error('grpc unavailable error: %s', e)
return True
return False
@retry(reraise=True, stop=stop_after_attempt(3),
retry=retry_if_exception(is_grpc_error_unavailable))
def converse(self):
"""Send a voice request to the Assistant and playback the response.
Returns: True if conversation should continue.
"""
continue_conversation = False
self.conversation_stream.start_recording()
logging.info('Recording audio request.')
def iter_converse_requests():
for c in self.gen_converse_requests():
assistant_helpers.log_converse_request_without_audio(c)
yield c
self.conversation_stream.start_playback()
# This generator yields ConverseResponse proto messages
# received from the gRPC Google Assistant API.
for resp in self.assistant.Converse(iter_converse_requests(),
self.deadline):
assistant_helpers.log_converse_response_without_audio(resp)
if resp.error.code != code_pb2.OK:
logging.error('server error: %s', resp.error.message)
break
if resp.event_type == END_OF_UTTERANCE:
logging.info('End of audio request detected')
self.conversation_stream.stop_recording()
if resp.result.spoken_request_text:
logging.info('Transcript of user request: "%s".',
resp.result.spoken_request_text)
logging.info('Playing assistant response.')
if len(resp.audio_out.audio_data) > 0:
self.conversation_stream.write(resp.audio_out.audio_data)
if resp.result.spoken_response_text:
logging.info(
'Transcript of TTS response '
'(only populated from IFTTT): "%s".',
resp.result.spoken_response_text)
if resp.result.conversation_state:
self.conversation_state = resp.result.conversation_state
if resp.result.volume_percentage != 0:
self.conversation_stream.volume_percentage = (
resp.result.volume_percentage
)
if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
continue_conversation = True
logging.info('Expecting follow-on query from user.')
elif resp.result.microphone_mode == CLOSE_MICROPHONE:
continue_conversation = False
logging.info('Finished playing assistant response.')
self.conversation_stream.stop_playback()
return continue_conversation
def gen_converse_requests(self):
"""Yields: ConverseRequest messages to send to the API."""
converse_state = None
if self.conversation_state:
logging.debug('Sending converse_state: %s',
self.conversation_state)
converse_state = embedded_assistant_pb2.ConverseState(
conversation_state=self.conversation_state,
)
config = embedded_assistant_pb2.ConverseConfig(
audio_in_config=embedded_assistant_pb2.AudioInConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
),
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
volume_percentage=self.conversation_stream.volume_percentage,
),
converse_state=converse_state
)
# The first ConverseRequest must contain the ConverseConfig
# and no audio data.
yield embedded_assistant_pb2.ConverseRequest(config=config)
for data in self.conversation_stream:
# Subsequent requests need audio data, but not config.
yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
@click.command()
@click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT,
metavar='<api endpoint>', show_default=True,
help='Address of Google Assistant API service.')
@click.option('--credentials',
metavar='<credentials>', show_default=True,
default=os.path.join(click.get_app_dir('google-oauthlib-tool'),
'credentials.json'),
help='Path to read OAuth2 credentials.')
@click.option('--verbose', '-v', is_flag=True, default=False,
help='Verbose logging.')
@click.option('--input-audio-file', '-i',
metavar='<input file>',
help='Path to input audio file. '
'If missing, uses audio capture')
@click.option('--output-audio-file', '-o',
metavar='<output file>',
help='Path to output audio file. '
'If missing, uses audio playback')
@click.option('--audio-sample-rate',
default=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
metavar='<audio sample rate>', show_default=True,
help='Audio sample rate in hertz.')
@click.option('--audio-sample-width',
default=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
metavar='<audio sample width>', show_default=True,
help='Audio sample width in bytes.')
@click.option('--audio-iter-size',
default=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
metavar='<audio iter size>', show_default=True,
help='Size of each read during audio stream iteration in bytes.')
@click.option('--audio-block-size',
default=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
metavar='<audio block size>', show_default=True,
help=('Block size in bytes for each audio device '
'read and write operation..'))
@click.option('--audio-flush-size',
default=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
metavar='<audio flush size>', show_default=True,
help=('Size of silence data in bytes written '
'during flush operation'))
@click.option('--grpc-deadline', default=DEFAULT_GRPC_DEADLINE,
metavar='<grpc deadline>', show_default=True,
help='gRPC deadline in seconds')
@click.option('--once', default=False, is_flag=True,
help='Force termination after a single conversation.')
def main(api_endpoint, credentials, verbose,
input_audio_file, output_audio_file,
audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size,
grpc_deadline, once, *args, **kwargs):
"""Samples for the Google Assistant API.
Examples:
Run the sample with microphone input and speaker output:
$ python -m googlesamples.assistant
Run the sample with file input and speaker output:
$ python -m googlesamples.assistant -i <input file>
Run the sample with file input and output:
$ python -m googlesamples.assistant -i <input file> -o <output file>
"""
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
return
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
# Configure audio source and sink.
audio_device = None
if input_audio_file:
audio_source = audio_helpers.WaveSource(
open(input_audio_file, 'rb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_source = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
if output_audio_file:
audio_sink = audio_helpers.WaveSink(
open(output_audio_file, 'wb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_sink = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
# Create conversation stream with the given audio source and sink.
conversation_stream = audio_helpers.ConversationStream(
source=audio_source,
sink=audio_sink,
iter_size=audio_iter_size,
sample_width=audio_sample_width,
)
with SampleAssistant(conversation_stream,
grpc_channel, grpc_deadline) as assistant:
# If file arguments are supplied:
# exit after the first turn of the conversation.
if input_audio_file or output_audio_file:
assistant.converse()
return
# If no file arguments supplied:
# keep recording voice requests using the microphone
# and playing back assistant response using the speaker.
# When the once flag is set, don't wait for a trigger. Otherwise, wait.
wait_for_user_trigger = not once
#webcamera.webcamera()
while True:
if wait_for_user_trigger:
input_state = GPIO.input(18)
if input_state == True:
GPIO.output(23,False)
continue
else:
GPIO.output(23,True)
pass
#click.pause(info='Press Enter to send a new request...')
continue_conversation = assistant.converse()
# wait for user trigger if there is no follow-up turn in
# the conversation.
wait_for_user_trigger = not continue_conversation
# If we only want one conversation, break.
if once and (not continue_conversation):
break
if __name__ == '__main__':
main()
from flask import Flask
from flask import request, jsonify
import json
import numpy as np
import cv2
import subprocess
import time
import apple_keras as apple
import sys, os
import cv2
from keras.preprocessing.image import load_img, img_to_array
from keras import backend as K
app = Flask(__name__)
image_size = 32
categories = ["red apple", "green apple"]
@app.route('/camera')
def camera():
cap = cv2.VideoCapture(0)
cap.set(3, 760)
cap.set(4, 480)
while(True):
ret, frame = cap.read()
cv2.imshow('frame', frame)
subprocess.call( ["aplay", "camera.wav"] )
time.sleep(0.5)
cv2.imwrite("apple.png", frame)
text = "picture"
break
cap.release()
cv2.destroyAllWindows()
return text
@app.route('/what')
def whatisthis():
X = []
img = load_img("./apple.png", target_size=(image_size,image_size))
in_data = img_to_array(img)
X.append(in_data)
X = np.array(X)
X = X.astype("float") / 256
model = apple.build_model(X.shape[1:])
model.load_weights("./image/apple-model.h5")
pre = model.predict(X)
print(pre)
if pre[0][0] > 0.9:
print(categories[0])
text = categories[0]
text = text.encode('utf-8')
return text
elif pre[0][1] > 0.9:
print(categories[1])
text = categories[1]
text = text.encode('utf-8')
return text
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8080)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment