###Tested with:
- Spark 2.0.0 pre-built for Hadoop 2.7
- Mac OS X 10.11
- Python 3.5.2
Use s3 within pyspark with minimal hassle.
class ApacheSpark < Formula | |
desc "Engine for large-scale data processing" | |
homepage "https://spark.apache.org/" | |
url "https://archive.apache.org/dist/spark/spark-2.2.0/spark-2.2.0-bin-hadoop2.7.tgz" | |
version "2.2.0" | |
sha256 "97fd2cc58e08975d9c4e4ffa8d7f8012c0ac2792bcd9945ce2a561cf937aebcc" | |
head "https://github.com/apache/spark.git" | |
bottle :unneeded |
<?xml version="1.0" encoding="UTF-8"?> | |
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" | |
xmlns:moz="http://www.mozilla.org/2006/browser/search/"> | |
<ShortName>Google US</ShortName> | |
<Description>Google US</Description> | |
<InputEncoding>UTF-8</InputEncoding> | |
<moz:SearchForm>https://www.google.com/ncr</moz:SearchForm> | |
<Url type="text/html" method="get" template="https://www.google.com/search"> | |
<Param name="gl" value="en"/> | |
<Param name="hl" value="en"/> |
javascript:void (function () { | |
let o = $('.card-short-id'); | |
o.hasClass('hide') ? o.removeClass('hide').css({ | |
'margin-right': '5px', | |
color: 'black', | |
'font-weight': 'bold' | |
}) : o.addClass('hide'); | |
}()); |
#!/usr/bin/python | |
import hmac, struct, time, base64, hashlib # for totp generation | |
import re, sys, subprocess # for general stuff | |
from getopt import getopt, GetoptError # pretending to be easy-to-use | |
# | |
# gtb - Google(auth) + Tunnelblick | |
# |
<!-- place this in an %angular paragraph --> | |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.5/leaflet.css" /> | |
<div id="map" style="height: 800px; width: 100%"></div> | |
<script type="text/javascript"> | |
function initMap() { | |
var map = L.map('map').setView([30.00, -30.00], 3); | |
L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', { |
import org.apache.commons.io.IOUtils | |
import java.net.URL | |
import java.nio.charset.Charset | |
// Zeppelin creates and injects sc (SparkContext) and sqlContext (HiveContext or SqlContext) | |
// So you don't need create them manually | |
// load map data | |
val myMapText = sc.parallelize( | |
IOUtils.toString( |
import pytest | |
def test_spark_sanity_check(sc, tmpdir): | |
csv = tmpdir.mkdir(__name__).join('one_liner.csv') | |
csv.write("foo,bar,baz") | |
data = sc.textFile(str(csv)) | |
line_count = data.count() | |
assert line_count == 1 |
var https = require('https'); | |
var util = require('util'); | |
exports.handler = function(event, context) { | |
console.log(JSON.stringify(event, null, 2)); | |
console.log('From SNS:', event.Records[0].Sns.Message); | |
var postData = { | |
"channel": "#aws-sns", | |
"username": "AWS SNS via Lamda :: DevQa Cloud", |