Go to the egghead website, i.e. Building a React.js App
run
$.each($('h4 a'), function(index, video){
console.log(video.href);
});
import os | |
from tqdm import tqdm | |
import cv2 | |
import numpy as np | |
from pprint import pprint | |
from os.path import join | |
import glob | |
from sklearn.utils import shuffle | |
from keras.utils import to_categorical |
>>> R[0] | |
array([26801.16945753, 38668.6046967 , 34937.9753498 , 45478.98967837]) | |
>>> R[1] | |
array([39823.04549356, 45055.11438545, 44048.634533 , 50008.11226016]) | |
>>> zip(R[0], R[1]) | |
[(26801.16945753, 39823.04549356), (38668.6046967, 45055.11438545), (34937.9753498, 44048.634533), (45478.98967837, 50008.11226016)] |
#include "LineFollower.h" | |
//Setting inverted logic false | |
bool pdlf::Robot::inverted_logic = false; | |
//Setting the default threshold value | |
size_t pdlf::Robot::THRESHOLD = 200; | |
//Setting global speed |
Go to the egghead website, i.e. Building a React.js App
run
$.each($('h4 a'), function(index, video){
console.log(video.href);
});
def infer(input_data, model=model): | |
inference = [] | |
classes = np.array(['desert', 'mountain', 'sea', 'sunset', 'trees']) | |
y_pred = model.predict(input_data) | |
# Performing masking | |
y_pred = (y_pred > 0.5) * 1.0 | |
from keras.models import Sequential | |
from keras.layers import Dense, Dropout, Flatten | |
from keras.layers import Conv2D, MaxPooling2D, Activation | |
batch_size = 100 | |
num_classes = 5 | |
epochs = 50 | |
# input image dimensions | |
img_rows, img_cols = 100, 100 |
import keras.backend as K | |
def multitask_loss(y_true, y_pred): | |
# Avoid divide by 0 | |
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon()) | |
# Multi-task loss | |
return K.mean(K.sum(- y_true * K.log(y_pred) - (1 - y_true) * K.log(1 - y_pred), axis=1)) |
import matplotlib.pyplot as plt | |
import h5py | |
from sklearn.model_selection import train_test_split | |
def load(): | |
f = h5py.File("./data/dataset.h5") | |
x = f['x'].value | |
y = f['y'].value | |
f.close() | |
def build_deep_conv_autoencoder(img_shape=(44, 44, 3), code_size): | |
H,W,C = img_shape | |
# encoder | |
encoder = keras.models.Sequential() | |
encoder.add(L.InputLayer(img_shape)) | |
encoder.add(L.Conv2D(32, kernel_size=(3, 3),strides=1, padding='same', activation='elu')) | |
encoder.add(L.MaxPool2D(pool_size=(2, 2))) | |
encoder.add(L.Conv2D(64, kernel_size=(3, 3),strides=1, padding='same', activation='elu')) | |
encoder.add(L.MaxPool2D(pool_size=(2, 2))) |