Skip to content

Instantly share code, notes, and snippets.

scala.reflect.internal.pickling.PickleBuffer.readIndex_$eq(int)
scala.reflect.internal.pickling.UnPickler$Scan$LazyTypeRef.completeInternal(UnPickler.scala:715)
scala.reflect.internal.pickling.UnPickler$Scan$LazyTypeRef.completeInternal(UnPickler.scala:739)
scala.reflect.internal.SymbolTable.basetypeRecursions_$eq(int)
scala.reflect.internal.SymbolTable.ids_$eq(int)
scala.reflect.internal.SymbolTable.infoTransformers_$eq(scala.reflect.internal.InfoTransformers$InfoTransformer)
scala.reflect.internal.SymbolTable.phase_$eq(scala.reflect.internal.Phase)
scala.reflect.internal.SymbolTable.scala$reflect$internal$Names$$nc_$eq(int)
scala.reflect.internal.SymbolTable.scala$reflect$internal$tpe$TypeComparers$$_subsametypeRecursions_$eq(int)
scala.reflect.internal.SymbolTable.scala$reflect$internal$Types$$_basetypeRecursions_$eq(int)
Benchmark Mode Threads Samples Score Score Error (99.9%) Unit Param: a1_sizeString Param: fnName
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 47.836554 0.395595 ns/op 0 distinct
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.778683 0.007060 ns/op 0 distinctTrivial
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.768844 0.006791 ns/op 0 distinctSimple
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.773388 0.008900 ns/op 0 distinctSimpleJavaHashSet
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.801826 0.155944 ns/op 0 distinctSimpleWhileJavaHashSet
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.767716 0.005033 ns/op 0 distinctSimpleWhileIteratorOneAdd
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.768857 0.007509 ns/op 0 distinctSimpleWhileJavaHashSetIterator
scala.collection.DistinctBenchmark.testDistinct avgt 1 10 6.770802 0.015571 ns/op 0 distinctSimpleWhil
Benchmark (size) Baseline Score Baseline Error Improved Score Improved Error Change [%] Error [%]
ArrayBenchmark.concat_raw 0 255,885 13,147 52,328 3,079 489,00% 5,51%
ArrayBenchmark.concat_raw 1 299,804 16,191 290,5 14,903 103,20% 5,27%
ArrayBenchmark.concat_raw 10 488,963 26,936 482,184 27,768 101,41% 5,63%
ArrayBenchmark.concat_raw 100 2273,687 119,895 2226,962 130,443 102,10% 5,57%
ArrayBenchmark.concat_raw 1000 6262,749 329,295 13907,542 3406,33 45,03% 14,88%
ArrayBenchmark.fill_raw 0 47,861 3,081 26,835 1,527 178,35% 6,06%
ArrayBenchmark.fill_raw 1 60,451 3,459 60,335 3,349 100,19% 5,64%
ArrayBenchmark.fill_raw 10 120,94 7,277 122,238 6,824 98,94% 5,80%
ArrayBenchmark.fill_raw 100 715,567 40,233 757,701 44,284 94,44% 5,73%
Benchmark (size) Baseline Score Baseline Error Improved Score Improved Error Change
ArrayBenchmark.concat_raw 0 187.429 1.059 22.098 0.3 848.17%
ArrayBenchmark.concat_raw 1 198.556 1.238 210.646 1.031 94.26%
ArrayBenchmark.concat_raw 10 305.553 1.854 319.792 2.165 95.55%
ArrayBenchmark.concat_raw 100 636.05 4.001 649.02 3.398 98.00%
ArrayBenchmark.concat_raw 1000 8060.869 3896.583 3797.745 46.442 212.25%
ArrayBenchmark.fill_raw 0 39.921 0.321 13.453 0.274 296.74%
ArrayBenchmark.fill_raw 1 40.623 0.64 41.135 0.468 98.76%
ArrayBenchmark.fill_raw 10 77.911 0.809 76.629 2.746 101.67%
ArrayBenchmark.fill_raw 100 443.804 7.553 437.723 21.293 101.39%
@pkukielka
pkukielka / SpellChecker.scala
Last active December 31, 2015 18:59
Make implementation parallel
class SpellChecker {
val alphabet = ('a' to 'z') ++ " "
def trim(word: String) = word.toLowerCase.filter(alphabet.contains(_))
val file = io.Source.fromFile("big.txt").mkString
val wordsCount = file.split(' ').map(trim).groupBy(identity).mapValues(_.length).par
def distortions(w: String) = {
def disort(t: Int, d: Int, insert: String) = w.take(t) + insert + w.drop(d)
val transposes = for { i <- (0 to w.length - 2) } yield disort(i, i + 2, "" + w(i + 1) + w(i))
@pkukielka
pkukielka / bloom.rs
Created December 9, 2013 20:32
Bloom Filter implementation in Rust
extern mod extra;
use std::num;
use extra::bitv::Bitv;
struct BloomFilter {m: uint, k: uint, buckets: Bitv}
impl BloomFilter {
fn new(n: uint, p: float) -> BloomFilter {
let m = ((-(n as float) * num::ln(p) / 0.48)) as uint + 1;
let k = (0.7 * (m as float) / (n as float)) as uint;
class AnagramsCounter {
private val map = scala.collection.mutable.HashMap.empty[String, Int]
private def charBuffer() = Array.fill[Int](256)(0)
private def getValue(key: String) = map.getOrElse(key, 0)
private def insert(key: String) = map += (key -> (getValue(key) + 1))
def parse(input: String) {
var letters = charBuffer()
@pkukielka
pkukielka / BloomFilter.scala
Last active December 30, 2015 02:19
Simple scala Bloom filter. Uses MurmurHash3 to compute hashes.
import scala.collection.mutable.BitSet
import scala.util.hashing.{MurmurHash3 => MH3}
import scala.math.{abs, log}
class BloomFilter(n: Int, p: Double) {
val m = (-n * log(p) / 0.48).toInt + 1
val k = (0.7 * m / n).toInt
val buckets = new BitSet(m)
def add(elem: String) = hash(elem).forall(buckets += _)
import scala.actors.Futures._
object ScalaMul {
val cores = Runtime.getRuntime().availableProcessors()
val sets = (1 to cores).map(_ to 200000 by cores)
val jobs = sets.map(set => future(set.foldLeft(BigInt(1))(_ * _)))
val mult = awaitAll(3600000, jobs:_*).foldLeft(BigInt(1))(_ * _.get.asInstanceOf[BigInt])
def main(args: Array[String]) = println(mult)
}
package com.pkukielka
import scala.actors.Futures._
object ScalaSum {
def sum(r: Range): Long = { var i = r.start; var sum = 0L; while (i <= r.end) { i += r.step; sum += i }; return sum }
val cores = Runtime.getRuntime().availableProcessors()
val jobs = (1 to cores).map(_ to 1234567890 by cores).map(range => future(sum(range)))
def main(args: Array[String]) {