I hereby claim:
- I am yuiki on github.
- I am yuiki (https://keybase.io/yuiki) on keybase.
- I have a public key ASBcalVwx7aMThT5Pk3UhLEd1B9_23pe5DrLPDDU0mB_ago
To claim this, I am signing this object:
package main | |
import ( | |
"fmt" | |
"sync" | |
) | |
type Fetcher interface { | |
// Fetch returns the body of URL and | |
// a slice of URLs found on that page. |
import requests | |
from bs4 import BeautifulSoup | |
while True: | |
word = input() | |
url = "http://ejje.weblio.jp/content/" + word | |
request = requests.get(url) | |
soup = BeautifulSoup(request.text, "html.parser") | |
meaning = soup.find(class_='content-explanation').text | |
print(meaning) |
#!/bin/bash | |
BACKUP_UPPER_LIMIT=5 | |
BASE_DIR=/mnt/backup/backup | |
NEW_DIR=`date +'%Y%m%d'` | |
LATEST_DIR=`ssh raspi ls -t $BASE_DIR | head -1` | |
if [ "$LATEST_DIR" = "$NEW_DIR" ]; then | |
echo 1#exit 1 | |
fi |
# based on https://github.com/flutter/flutter/blob/master/analysis_options.yaml | |
analyzer: | |
strong-mode: | |
implicit-dynamic: false | |
errors: | |
missing_required_param: warning | |
missing_return: warning | |
todo: ignore | |
sdk_version_async_exported_from_core: ignore |
set clipboard& | |
set smartindent | |
set shiftwidth=2 | |
set tabstop=2 | |
set expandtab | |
syntax on |
I hereby claim:
To claim this, I am signing this object:
import pychromecast | |
from pychromecast.controllers.youtube import YouTubeController | |
CAST_NAME = "" | |
VIDEO_ID = "" | |
chromecasts = pychromecast.get_chromecasts() | |
cast = next(cc for cc in chromecasts if cc.device.friendly_name == CAST_NAME) | |
cast.wait() |
import csv | |
import sys | |
argv = sys.argv | |
if (len(argv) != 2): | |
print("Usage: python3 {} gpa.csv".format(argv[0])) | |
quit() | |
total_amount = 0 |
import csv | |
import sys | |
argv = sys.argv | |
if (len(argv) != 2): | |
print("Usage: python3 {} gpa.csv".format(argv[0])) | |
quit() | |
total_amount = 0 |
from bs4 import BeautifulSoup | |
import requests | |
import unicodedata | |
url = "https://tsukuba-daigaku.com/?bukken=jsearch&shu=2&kalc=0&kahc=0&mad%5B%5D=110&mad%5B%5D=120&tik=0&cor=0" | |
districts = {} | |
count = 0 | |
for i in range(0, 40): | |
request = requests.get(url) | |
soup = BeautifulSoup(request.text, "html.parser") |