Skip to content

Instantly share code, notes, and snippets.

2016-06-20: 1105 due / 1010 paid(upwork).
2016-06-27: 1102 due / 1202 paid(upwork).
2016-07-04: 1157 due / 1078 paid(upwork).
2016-07-11: 1134 due / 986 paid(upwork).
Sum diff: 4498 - 4276 = 222
Hiring effort for data scientist position (hh.ru): 160
Total: $382
{
"href" : "/v1/databases/11111111-1111-1111-1111-111111111111/offers?group=20160330_TBL_OF_BH_Apoio_Email_01&limit=10&offset=0",
"totalCount" : 8,
"offset" : 0,
"limit" : 10,
"first" : {
"href" : "/v1/databases/11111111-1111-1111-1111-111111111111/offers?group=20160330_TBL_OF_BH_Apoio_Email_01&limit=10&offset=0"
},
"last" : {
"href" : "/v1/databases/11111111-1111-1111-1111-111111111111/offers?group=20160330_TBL_OF_BH_Apoio_Email_01&limit=10&offset=0"
{
"href" : "/v1/databases/11111111-1111-1111-1111-111111111111/users?property=documentNumber&value=11111111111&expand=properties&limit=10&offset=0",
"totalCount" : null,
"offset" : 0,
"limit" : 10,
"first" : {
"href" : "/v1/databases/11111111-1111-1111-1111-111111111111/users?property=documentNumber&value=11111111111&expand=properties&limit=10&offset=0"
},
"last" : null,
"next" : null,
mysql> SHOW STATUS LIKE 'innodb%';
+-----------------------------------------+---------------+
| Variable_name | Value |
+-----------------------------------------+---------------+
| Innodb_available_undo_logs | 128 |
| Innodb_background_log_sync | 1585231 |
| Innodb_buffer_pool_bytes_data | 11860033536 |
| Innodb_buffer_pool_bytes_dirty | 0 |
| Innodb_buffer_pool_dump_status | not started |
| Innodb_buffer_pool_load_status | not started |
This file has been truncated, but you can view the full file.
START TRANSACTION;
CREATE temporary table if not exists STUDIO_TEMP_STAGE_USER_PROPS (
uuid varchar(80) COLLATE utf8_unicode_ci NOT NULL,
raw_content longtext COLLATE utf8_unicode_ci NOT NULL,
security_level varchar(255) COLLATE utf8_unicode_ci NOT NULL,
key_name varchar(255) COLLATE utf8_unicode_ci NOT NULL,
stage_user_uuid varchar(80) NOT NULL,
data_type varchar(255) COLLATE utf8_unicode_ci NOT NULL,
string_value varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
integer_value bigint(20) DEFAULT NULL,double_value double DEFAULT NULL,
CREATE TABLE IF NOT EXISTS `STUDIO_MANAGERS` (
`uuid` varchar(80) COLLATE utf8_unicode_ci NOT NULL,
`USERNAME` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`PASSWORD` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`EMAIL` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`PHONE` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`FIRST_NAME` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`LAST_NAME` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`STATUS` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`DATE_CREATED` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
<!doctype html><!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@snowindy
snowindy / zeppelin-emr-bootstrap-action.sh
Last active May 27, 2016 01:47
This is bootstrap action script for Amazon EMR helping installing Apache Zeppelin to work with Apache Spark. Works fine with [EMR Release label:emr-4.0.0 Hadoop distribution:Amazon 2.6.0 Applications:Hive 1.0.0, Spark 1.4.1]
#!/bin/bash
#Zeppelin installation on Spark EMR cluster
sudo yum update -y
sudo yum install -y git
# Installing maven: https://gist.github.com/sebsto/19b99f1fa1f32cae5d00
sudo wget http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo -O /etc/yum.repos.d/epel-apache-maven.repo
sudo sed -i s/\$releasever/6/g /etc/yum.repos.d/epel-apache-maven.repo
sudo yum install -y apache-maven
@snowindy
snowindy / spark-create-rdd-from-s3-parallel.scala
Last active September 24, 2020 12:25
This code allows parallel loading of data from S3 to Spark RDD. Support multiple paths to load from. Based on http://tech.kinja.com/how-not-to-pull-from-s3-using-apache-spark-1704509219
val s3Paths = "s3://yourbucket/path/to/file1.txt,s3://yourbucket/path/to/directory"
val pageLength = 100
val key = "YOURKEY"
val secret = "YOUR_SECRET"
import com.amazonaws.services.s3._, model._
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.s3.model.ObjectListing
import scala.collection.JavaConverters._
import scala.io.Source
public static int getSecondsToDayHour(Date fromDate, int hour) {
Calendar cal = Calendar.getInstance();
cal.set(Calendar.HOUR_OF_DAY, hour);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
long seconds = (cal.getTime().getTime() - fromDate.getTime()) / 1000;
if (seconds < 0) {
cal.add(Calendar.DATE, 1);