Skip to content

Instantly share code, notes, and snippets.

View Alescontrela's full-sized avatar

Alejandro Escontrela Alescontrela

View GitHub Profile
def initialize(board, n):
for key in ['queen','row','col','nwtose','swtone']:
board[key] = {}
for i in range(n):
board['queen'][i] = -1
board['row'][i] = 0
board['col'][i] = 0
for i in range(-(n-1),n):
board['nwtose'][i] = 0
@Alescontrela
Alescontrela / search.py
Created May 24, 2018 15:40
Search for a range
class Solution:
def searchRange(self, nums, target):
if len(nums) == 0:
return [-1,-1]
if (len(nums) == 1):
if nums[0] != target:
return [-1, -1]
return [0, 0]
"""
@Alescontrela
Alescontrela / pg-pong.py
Created June 1, 2018 01:56 — forked from karpathy/pg-pong.py
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
@Alescontrela
Alescontrela / cnn.py
Last active April 23, 2021 00:20
A multi-layer convolutional neural network created from scratch with NumPy
'''
Description: A multi-layer convolutional neural network created from scratch with NumPy
Author: Alejandro Escontrela
Version: 1.1
License: MIT
'''
import numpy as np
import matplotlib.pyplot as plt
import pickle
def convolution(image, filt, bias, s=1):
'''
Confolves `filt` over `image` using stride `s`
'''
(n_f, n_c_f, f, _) = filt.shape # filter dimensions
n_c, in_dim, _ = image.shape # image dimensions
out_dim = int((in_dim - f)/s)+1 # calculate output dimensions
# ensure that the filter dimensions match the dimensions of the input image
def maxpool(image, f=2, s=2):
```
Downsample input `image` using a kernel size of `f` and a stride of `s`
```
n_c, h_prev, w_prev = image.shape
# calculate output dimensions after the maxpooling operation.
h = int((h_prev - f)/s)+1
w = int((w_prev - f)/s)+1
(nf2, dim2, _) = pooled.shape
fc = pooled.reshape((nf2 * dim2 * dim2, 1)) # flatten pooled layer
def softmax(raw_preds):
'''
pass raw predictions through softmax activation function
'''
out = np.exp(raw_preds) # exponentiate vector of raw predictions
return out/np.sum(out) # divide the exponentiated vector by its sum. All values in the output sum to 1.
def categoricalCrossEntropy(probs, label):
'''
calculate the categorical cross-entropy loss of the predictions
'''
return -np.sum(label * np.log(probs)) # Multiply the desired output label by the log of the prediction, then sum all values in the vector
z = w3.dot(fc) + b3 # first dense layer
z[z<=0] = 0 # pass through ReLU non-linearity
out = w4.dot(z) + b4 # second dense layer