View download-genbank.sh
#!/bin/bash
set -euo pipefail
rm -rf genbank/
rsync \
--archive \
--progress \
--recursive \
View scrape-greengenes.sh
#!/bin/bash
set -euo pipefail
function get_source() {
curl --silent "http://greengenes.secondgenome.com/downloads/database/13_5"
}
function get_urls() {
get_source | grep 'https:' | grep s3 | cut -d '"' -f 2
View rargparse.R
#!/usr/bin/env Rscript
library(R6)
library(testthat)
Argument <- R6Class('Argument',
public = list(
flag = NA,
help = NA,
View gaga2.go
package main
import (
"os"
"fmt"
"bufio"
)
func main() {
View pcr.py
class DNA:
def __init__(self, sequence: str) -> None:
self.sequence = str(sequence).lower()
@property
def reverse(self) -> 'DNA':
return DNA(self.sequence[::-1])
@property
View scrape.py
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
url = 'http://www.berkeleybowl.com/daily-hot-soup'
res = requests.get(url)
soup = BeautifulSoup(res.content, 'html5lib')
View asdf.rb
class Render
def self.[](engine:)
p engine
end
end
Render[engine: 'doot']
View boat.rb
class Boat
class << self
def <=> boat
boat
end
def [] boat
boat
end
View ncbi-fetch.rb
#!/usr/bin/env ruby
# download all genomes from ncbi using 8 processes
# ./ncbi-fetch.rb | xargs -I {} -n1 -p 8 wget {}
File.open('assembly_summary.txt').each do |line|
row = line.strip.split("\t")
base_url = row[19]
next if base_url.nil?
dir = base_url.split('/').last
View nose.Makefile
test: .PHONY
@python -m 'nose' --nocapture
ftest: .PHONY
@python -m 'nose' --with-focus --nocapture
clean:
@git clean -f
.PHONY: