Drag and drop desired messages to directories (e.g., archived/
).
Convert directory to mbox file
./eml2mbox.py archived archived.mbox
Follow directions here to upload via Gmail API
https://github.com/google/import-mailbox-to-gmail
#! /usr/bin/env fish | |
set target_pids (pgrep caffeinate) | |
set num_pids (count $target_pids) | |
if test $num_pids -eq 0 | |
caffeinate -d -t 43200 | |
else | |
for target_pid in $target_pids | |
kill $target_pid |
from opentrons import robot, containers, instruments | |
robot.connect(robot.get_serial_ports_list()[0]) | |
robot.home() | |
tiprack200 = containers.load('tiprack-200ul', 'C2') | |
trash = containers.load('point', 'D2') | |
p200 = instruments.Pipette(axis='a', | |
max_volume=200, | |
min_volume=20, |
import json | |
from copy import deepcopy | |
from itertools import product | |
from collections import OrderedDict | |
all_wells = ['{}{}'.format(r, c) for c in range(1, 13) for r in 'ABCDEFGH'] | |
with open('_containers_create.json.bak','r') as ip: | |
obj = json.load(ip) |
import pandas as pd | |
from airtable import airtable | |
metadata = pd.read_table(metadata_path, sep=',', header=0, index_col=None) | |
at = airtable.Airtable(base_id, api_key) | |
ibd_records = at.get('sample', filter_by_formula='AND(project = "cho-ibd", phenotype = "")')['records'] | |
for record in ibd_records: | |
if (metadata.local_sample == record['fields']['sample_id']).sum() != 1: |
Drag and drop desired messages to directories (e.g., archived/
).
Convert directory to mbox file
./eml2mbox.py archived archived.mbox
Follow directions here to upload via Gmail API
https://github.com/google/import-mailbox-to-gmail
package com.cloudera.science.throwaway; | |
public class ThrowAway { | |
private String datum = "Howdy!"; | |
public ThrowAway() { } | |
public String response() { | |
return "Goodbye!"; | |
} |
import org.bdgenomics.formats.avro.AlignmentRecord | |
import org.bdgenomics.adam.rdd.ADAMContext._ | |
val bamFile = "/Users/laserson/repos/adam/adam-core/src/test/resources/artificial.sam" | |
val reads = sc.adamLoad[AlignmentRecord, Nothing](bamFile) | |
reads.first() | |
/* | |
java.lang.IncompatibleClassChangeError: Implementing class | |
at java.lang.ClassLoader.defineClass1(Native Method) |
--------------------------------------------------------------------------- | |
TypeError Traceback (most recent call last) | |
<ipython-input-33-9be617700fcd> in <module>() | |
6 t = ax.plot(times, events, lw=1, c=random_color()) | |
7 ax.set_yscale('log') | |
----> 8 IPython.core.pylabtools.print_figure(fig) | |
/usr/lib/python2.6/site-packages/IPython/core/pylabtools.pyc in print_figure(fig, fmt) | |
108 fmt = 'png' | |
109 fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight', |
Build Spark on local machine (only if using PySpark; otherwise, remote machine works) (http://spark.apache.org/docs/latest/building-with-maven.html)
export MAVEN_OPTS="-Xmx2g -XX:MaxPermSize=512M -XX:ReservedCodeCacheSize=512m"
mvn -Pyarn -Phadoop-2.4 -Dhadoop.version=2.4.0 -DskipTests clean package
Copy the assembly/target/scala-2.10/...jar
to the corresponding directory on
the cluster node and also into a location in HDFS.