https://speakerdeck.com/jonemo/factory-automation-with-python-pycon-2017
(includes extra and backup slides that will probably be skipped)
https://speakerdeck.com/jonemo/factory-automation-with-python-pycon-2017
(includes extra and backup slides that will probably be skipped)
223 tune 62,53,0,4 | |
224 tune 62,34,0,192 | |
225 tune 62,53,0,192 | |
226 tune 62,58,0,192 | |
227 tune 62,62,0,192 | |
228 tune 62,67,0,192 | |
229 tune 62,53,0,192 | |
230 tune 62,65,0,192 | |
231 tune 62,53,0,8 | |
232 tune 58,53,0,176 |
1 tune 0,0,0,1440 | |
2 tune 55,0,0,48 | |
3 tune 0,0,0,72 | |
4 tune 53,0,0,40 | |
5 tune 0,0,0,80 | |
6 tune 50,0,0,64 | |
7 tune 0,0,0,16 | |
8 tune 48,0,0,20 | |
9 tune 0,0,0,20 | |
10 tune 46,0,0,104 |
1 tune 72,0,0,0 | |
2 tune 67,0,0,0 | |
3 tune 76,0,0,1439 | |
4 tune 0,0,0,0 | |
5 tune 60,0,0,0 | |
6 tune 55,0,0,0 | |
7 tune 52,0,0,480 | |
8 tune 0,0,0,0 | |
9 tune 0,0,0,0 | |
10 tune 0,0,0,0 |
0 tune 0,52,0,180 | |
10 tune 0,0,0,60 | |
20 tune 0,52,0,60 | |
30 tune 0,0,0,60 | |
40 tune 0,52,0,60 | |
50 tune 0,0,0,60 | |
60 tune 0,52,0,180 | |
70 tune 0,0,0,60 | |
80 tune 0,52,0,180 | |
90 tune 0,0,0,300 |
import sys | |
import requests | |
print(f"Package Name,exists?,HTTP code,Author,Maintainer,registered by op?") | |
for package_name in sys.stdin: | |
package_name = package_name.strip() | |
resp_json = requests.get(f"https://pypi.org/pypi/{package_name}/json") | |
exists, sstagg, author, maintainer = False, False, '', '' | |
exists = resp_json.status_code == 200 |
__future__ | |
__main__ | |
_dummy_thread | |
_thread | |
abc | |
aifc | |
argparse | |
array | |
ast | |
asynchat |
# kill program hogging given port number | |
function killport { | |
lsof -ti :$1 | xargs kill | |
} | |
# clean up docker volumes and images | |
# https://stackoverflow.com/questions/31909979/ | |
function dclean { | |
docker volume rm $(docker volume ls -qf dangling=true) | |
docker rmi $(docker images | grep "^<none>" | awk "{print $3}") |
#!/usr/bin/env python3 | |
import re | |
def compress_iter(instr): | |
"""Generate one compressed chunk at a time, e.g. "A3""" | |
for match in re.finditer(r'(.)\1{0,}', instr): | |
yield match.group()[0] + str(len(match.group())) | |