Skip to content

Instantly share code, notes, and snippets.

View mayonesa's full-sized avatar
🇺🇦

John Jimenez mayonesa

🇺🇦
  • Florida, USA
View GitHub Profile
@mayonesa
mayonesa / hashTable.groovy
Last active December 30, 2015 15:59
Home-grown hash table
class HashTable {
private int capacity
private LinkedList<Node>[] buckets
HashTable() {
this(10)
}
HashTable(capacity) {
@mayonesa
mayonesa / MaxWeightTrianglePath.groovy
Created January 24, 2014 22:00
Gets the triangle path (from top to bottom) with the largest weight
class MaxTriangleWeight {
static def main(args) {
println(new MaxTriangleWeight().getTriangle(args[0]).maxWeight)
}
private def getTriangle(fileName) {
def reader = new BufferedReader(new FileReader(fileName))
def triangle = new Triangle(reader.readLine())
def row
while ((row = reader.readLine()) != null) {
@mayonesa
mayonesa / AsyncDisjointedChunkMultiprocessing.scala
Last active March 28, 2016 16:26
not unnecessary blocking (fork/join -> new cached-thread pool)
package jj.async
import scala.concurrent.{ ExecutionContext, Future, Await }
import java.util.concurrent.Executors.newCachedThreadPool
import rx.lang.scala.Observable
import jj.async.Helpers._
import scala.concurrent.duration._
import jj.async.Enriched._
/* Problem: How to optimize multi-process records asynchronously in chunks.
@mayonesa
mayonesa / AsyncChunkProcessor.scala
Last active March 28, 2016 16:29
fork/join -> new cached thread pool. removed commented code
package jj.async
import scala.concurrent.{ ExecutionContext, Future }
import java.util.concurrent.Executors.newCachedThreadPool
import rx.lang.scala.Observable
import ExecutionContext.Implicits.global
import jj.async.helpers._
/* Problem: How to multi-process records asynchronously in chunks.
Processing steps:
import scala.util.Random.nextInt
/**
Simulate throwing two six-sided dice 200 times and create a histogram of the results like this:
2: XXXX
3: XXXXXXXXXX
4: XXXXXXXXXXXXXXXXXXXXXX
5: XXXXXXXXXXXXXXXXXXXXX
6: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
def median(data):
return sorted(data)[len(data)/2]
def mode(data):
def loop(i, counts, c_max):
max_val = c_max[0]
if (i < 0):
return max_val
else:
c_val = data[i]
new_count = counts.get(c_val, 0) + 1
counts[c_val] = new_count
max_dup = c_max[1]
def variance(data):
n = len(data)
sums = reduce(lambda acc, x: (acc[0] + x, acc[1] + x**2), data, (0, 0))
return sums[1] / n - sums[0]**2 / n**2
#Compute the likelihood of observing a sequence of die rolls
#Likelihood is the probability of getting the specific set of rolls
#in the given order
#Given a multi-sided die whose labels and probabilities are
#given by a Python dictionary called dist and a sequence (list, tuple, string)
#of rolls called data, complete the function likelihood
def likelihood(dist,data):
return reduce(lambda acc, x: acc * dist[x], data, 1)
from __future__ import division
#given the mean of a set of data and
#get a single additional number. Given the number of observations in the
#existing data, the old mean and the new value
def mean(oldmean, n, x):
return (oldmean * n + x) / (n + 1)