Skip to content

Instantly share code, notes, and snippets.

View 0asa's full-sized avatar
🐽

Vincent Botta 0asa

🐽
View GitHub Profile
@0asa
0asa / pushbullet.py
Last active August 29, 2015 14:02
Pushbullet notification
import requests
API_KEY = 'API_KEY'
IDEN = 'DEVICE_ID'
# get the list of available devices
resp = requests.get('https://api.pushbullet.com/v2/devices',auth=(API_KEY,''))
print resp.json()
data = {
@0asa
0asa / colorize.py
Last active August 29, 2015 14:07
(brutally) Add color to a given text according to 'sentiment analysis' with Pattern (http://www.clips.ua.ac.be/pattern).
from pattern.en import sentiment
import colors
items = sentiment.viewitems()
# some text
text = ("Then, the module's variables, functions, and classes will be available to the caller through "
"the module’s namespace, a central concept in programming that is particularly helpful and "
"powerful in Python. Thanks to the way imports and modules are handled in Python, it is "
"relatively easy to structure a Python project. Easy, here, means that you do not have many "
curl -s https://api.github.com/orgs/twitter/repos?per_page=200 | ruby -rubygems -e 'require "json"; JSON.load(STDIN.read).each { |repo| %x[git clone #{repo["ssh_url"]} ]}'
from pyspark import SparkContext
import numpy as np
from sklearn.cross_validation import train_test_split, Bootstrap
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
def run(sc):
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@0asa
0asa / gitall.py
Created January 27, 2015 11:08
(naively) `git pull` all repositories found in the current directory.
import os, glob
folders = glob.glob('*')
for folder in folders:
if folder != 'gitall.py':
print ''
print "===="*5 + " " + folder + " " + "===="*5
os.chdir(folder)
os.system('git pull') # or whatever commands...
@0asa
0asa / sklearn-pyspark.py
Created January 27, 2015 11:12
Run a Scikit-Learn algorithm on top of Spark with PySpark
from pyspark import SparkConf, SparkContext
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
import pandas as pd
import numpy as np
conf = (SparkConf()
.setMaster("local[*]")
.setAppName("My app")
.set("spark.executor.memory", "1g"))
import glob
import pandas as pd
files = glob.glob('*.xlsx')
df = pd.DataFrame()
for f in files:
data = pd.read_excel(f,'Sheet1')
df = df.append(data)
Day Pulse Ox [^manual] Charge 2 [^automatically detected]
1 7h41 7h21
2 7h30 7h20
3 7h40 7h37
4 8h19 8h33
5 7h34 7h58 (+ 1h54 [^a nap])
6 8h33 8h23
7 7h45 7h16
Day Pulse Ox Charge 2
1 7.475 9.689
2 7.048 8.805
3 5.692 6.689
4 7.910 9.583
5 6.404 9.540
6 7.514 9.350
7 5.688 7.516