Skip to content

Instantly share code, notes, and snippets.

@nanzono
nanzono / get_http_header.py
Last active December 15, 2015 08:39
trace 301 and 302 redirect , and print http status code and location.
# coding:utf-8
import httplib
import urlparse
def get_http_header(survey_url):
p = urlparse.urlsplit(survey_url)
hostname = p.netloc
headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) " \
+ "AppleWebKit/537.22 (KHTML, like Gecko) " \
@nanzono
nanzono / WebElement.py
Created June 6, 2013 03:06
selenium.webdriver.remote.webelement.WebElementの各種プロパティの一覧。毎回調べるのもなんなので。
# selenium.webdriver.remote.webelement.WebElement
web_element_property = [
__class__,
__delattr__,
__dict__,
__doc__,
__eq__,
__format__,
__getattribute__,
@nanzono
nanzono / get_google_drive_pdfs.py
Created June 21, 2013 02:09
google-api-python-clientを利用して、pdfの一覧を取得する。 oauth2client.client.Credentialsの保存。
# coding:utf-8
import os
import httplib2
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Credentials
def get_credentials_oath2(file_path):
CLIENT_ID = '' # your client id
@nanzono
nanzono / get_days.py
Created July 23, 2013 03:35
80歳が寿命だとして。
# coding:utf-8
import datetime
def print_days(birth_year, birth_month, birth_day, target_year):
birth_day = datetime.date(birth_year, birth_month, birth_day)
today = datetime.date.today()
last_day = datetime.date(birth_day.year + target_year, birth_day.month, birth_day.day)
print last_day
@nanzono
nanzono / adafruit_evernote_strip.js
Created August 15, 2013 09:44
Adafrtuitの商品詳細ページの必要情報だけを残す。evernoteで取り込むために。
var tab_0 = document.getElementById("tab_0");
var tab_1 = document.getElementById("tab_1");
var tab_2 = document.getElementById("tab_2");
var tab_3 = document.getElementById("tab_3");
tab_0.style.display = "";
tab_1.style.display = "";
tab_2.style.display = "";
tab_3.style.display = "";
@nanzono
nanzono / scraper.py
Last active December 27, 2015 03:48
how to scrape
# coding:utf-8
from BeautifulSoup import BeautifulSoup
import urllib2
import json
def crawl(tmp_url):
page = urllib2.urlopen(tmp_url)
soup = BeautifulSoup(page.read())
@nanzono
nanzono / parse_html.js
Last active December 27, 2015 05:59
html要素を取得
var titles = document.getElementsByTagName("title");
var title = (titles.length > 0) ? titles[0].innerHTML: "";
var metas = document.getElementsByTagName("meta");
var description = "";
var keywords = "";
for (i=0; i<metas.length; i++){
var meta_name = metas[i].getAttribute("name");
if (meta_name=="description"){
description = metas[i].getAttribute("content");
} else if (meta_name=="keywords"){
@nanzono
nanzono / duplicated_finder.py
Created June 27, 2014 08:57
check duplicate rows.
# coding:utf-8
import os
import hashlib
import sqlite3
target_path = './20140620/'
db_path = './logs.db'
file_output = './output.txt'
# coding:utf-8
import os
target_path = './target/'
def main():
for dpath,dnames,fnames in os.walk(target_path):
for fname in fnames:
@nanzono
nanzono / reset_yun.ino
Created August 31, 2014 16:40
Arduino側からOpenWRTを呼び出して操作するために
#include <Process.h>
void c_print(Process p){
while(p.available()>0){
char c = p.read();
Serial.print(c);
}
Serial.flush();
}