Skip to content

Instantly share code, notes, and snippets.

@TobCap
TobCap / FlattenDictionary.swift
Created October 11, 2017 08:04
flatten dictionary, ignore deeper level of nested dictionary by `keepLevel`
func flattenDictionary(dic: [String: Any], keepLevel: UInt = UInt.max) -> [String: Any] {
var result: [String: Any] = [:]
func flattenDic(_ dic: [String: Any], out: inout [String: Any], level: UInt, addedKey: String = "") {
if level == 0 { return }
for (key, val) in dic {
let modKey = addedKey + key
if let val = val as? [String: Any] {
flattenDic(val, out: &out, level: level - 1, addedKey: modKey)
} else {
library("condMVNorm")
library("scatterplot3d")
# 2 variables
local({
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
set.seed(1)
s <- matrix(c(1.0, -0.5, -0.5, 1.0), 2, 2)
---
title: "How to set a random seed in R's parallel code with
package `foreach` on Windows 10"
output: md_document
---
The purpose of this post is to clarify the use of random seed for parallel
calculation with `foreach`, `doParallel`, and `doRND` on _Windows 10_ when
evaluating variables that are not difined inside the right side of `%dopar%`.
//: Playground - noun: a place where people can play
import UIKit
var str = "Hello, playground"
func transpose<T>(_ a: [[T]]) -> [[T]] {
if a.isEmpty {
return []
# http://qiita.com/HikaruR/items/c67b6c9714c51d70333c
# accessed on 2017-06-23
elem_n <- function(n, ...) {
dots <- list(...)
unlist(lapply(dots, `[`, n))
}
polygon_y123 <- function(y1, y2, y3, ...) {
polygon(x = elem_n(1, y1, y2, y3), y = elem_n(2, y1, y2, y3), ...)
@TobCap
TobCap / nested_tibble.r
Last active June 16, 2017 05:50
create nested data.frame (tibble) with rowwise()
library(dplyr)
library(tidyr)
df <- tibble(x = c(1,1,1), y = c(1,2,3))
# answer
a <- df %>%
mutate(add = x + y, sub=x - y) %>%
nest(add, sub)
@TobCap
TobCap / test.r
Last active December 17, 2016 03:00
do() v.s. map() when manipulating grouped data
m1 <- mtcars %>%
group_by(cyl) %>%
do(data_ = lm(mpg ~ hp + wt, data = .) %>% tidy) %>%
unnest
m2 <- mtcars %>%
split(.$cyl) %>%
map_df(~ lm(mpg ~ hp + wt, data = .) %>% tidy, .id = "cyl") %>%
as_data_frame
# grid search with {purrr} and {modelr}
# https://drsimonj.svbtle.com/grid-search-in-the-tidyverse
library("tidyverse")
library("purrr")
library("modelr")
library("rpart")
library("gridExtra")
# install.packags(c("xml2", "stringr", "dplyr"))
library(xml2)
library(stringr)
library(dplyr)
code <- 7267
url_ <- paste0("http://resource.ufocatch.com/atom/edinet/query/", code)
x <- read_xml(url_)

memo for installing spark in windows 10 and sparkR example code

download hadoop 2.7.1 of binary files (unofficial)

http://kplitzkahran.blogspot.jp/2015/08/hadoop-271-for-windows-10-binary-build.html

  • Expand .tar.gz to "C:\dev\hadoop-2.7.1".
  • Set environment variable as HADOOP_HOME=C:\dev\hadoop-2.7.1
  • Set path as c:\dev\hadoop-2.7.1\bin in your environment variable.

download spark 2.0.0

https://spark.apache.org/downloads.html

  • Select "2.0.0 (Jul 26 20116)", "Pre-built for Hadoop 2.7 and later", "Direct Download", and download spark-2.0.0-bin-hadoop2.7.tgz.