Skip to content

Instantly share code, notes, and snippets.

View boyank's full-sized avatar

Boyan Kolev boyank

  • Bulgaria, Sofia
View GitHub Profile
@boyank
boyank / nasa_jad.py
Last active March 19, 2021 05:33
Example of reading NASA JUNO JADE CALIBRATED SCIENCE DATA
"""Example of reading NASA JUNO JADE CALIBRATED SCIENCE DATA
https://pds-ppi.igpp.ucla.edu/search/view/?f=yes&id=pds://PPI/JNO-J_SW-JAD-3-CALIBRATED-V1.0/DATA/2018/2018091/ELECTRONS/JAD_L30_LRS_ELC_ANY_CNT_2018091_V03&o=1
https://stackoverflow.com/a/66687113/4046632
"""
import struct
from functools import reduce
from operator import mul
from collections import namedtuple
import requests
from bs4 import BeautifulSoup
import csv
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}
url = 'http://bdmhistoricalrecords.dia.govt.nz/Search/Search?Path=querySubmit.m%3fReportName%3dDeathSearch%26recordsPP%3d100000'
search_data = {'dsur':'Jones', 'dfirst':'Robert', 'ddate_lower':'01/01/1901', 'current_tab':'tab1', 'switch_tab':'Submit'}
resp = requests.post(url, data=search_data, headers=headers)
soup = BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class':'inner_table_left'})
@boyank
boyank / namesgenerator.py
Last active October 23, 2017 11:39
Docker/Moby project names generator, ported to Python from Go
# -*- coding: utf-8 -*-
"""Docker/Moby project names generator, ported to Python from Go
original code available at https://github.com/moby/moby/blob/master/pkg/namesgenerator/names-generator.go
Lists of adjectives and surnames as of 18 October 2017.
Licensed under the Apache License, Version 2.0 as the original code. See https://github.com/moby/moby/blob/master/LICENSE
"""
from random import choice, randint
@boyank
boyank / example_query.sql
Created October 3, 2017 11:12
example sql query
SELECT * FROM dual;
@boyank
boyank / infostock.py
Last active September 7, 2016 12:05
Python script to download BSE Sofia trades data from http://www.infostock.bg
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib2
def scrape_data(url, to_file):
soup = BeautifulSoup(urllib2.urlopen(url), 'lxml')
for tr in soup.find('table', class_='homeTable noborders').find_all('tr'):