I hereby claim:
- I am xorlev on github.
- I am xorlev (https://keybase.io/xorlev) on keybase.
- I have a public key whose fingerprint is 106C 2C05 76E1 3B35 69B0 B2CA 1606 E6ED 31AD 179A
To claim this, I am signing this object:
require 'active_record' | |
class Album < ActiveRecord::Base | |
belongs_to :artist | |
has_many :tracks | |
has_and_belongs_to_many :genres | |
attr_accessible :id, :artist_id, :genres | |
def to_s |
# Not the most efficient method... | |
File.open('track1/albumData1.txt').each do |line| | |
album, artist, *genres = split_and_filter(line) | |
genres.map! { |g| Genre.find(g) } | |
Album.create! :id => album, :artist_id => artist, :genres => genres | |
end |
On the last machine in the chain: | |
nc -l 1234 | pigz -d | tar xvf - | |
On each intermediate machine, you use a fifo to copy the data to the next machine as well as decompressing. | |
mkfifo myfifo | |
nc $NEXT_SERVER 1234 <myfifo & | |
nc -l 1234 | tee myfifo | pigz -d | tar xvf - |
public class MetricsWeb extends BaseTaskHook { | |
private static final Logger log = LoggerFactory.getLogger(MetricsWeb.class); | |
private static Server server = null; | |
private static CuratorFramework zk = null; | |
private static ServiceDiscovery<Void> dsc = null; | |
@Override | |
public void prepare(Map conf, TopologyContext context) { | |
if ("production".equals(conf.get("environment"))) { | |
initWebConsole(9090); |
// ==UserScript== | |
// @name Kato.im Fluid Dock Badge | |
// @description Displays the unread count of messages, adapted from http://userscripts.org/scripts/review/177616 | |
// @author Michael Rose | |
// ==/UserScript== | |
window.fluid.dockBadge = ''; | |
setTimeout(updateDockBadge, 1000); | |
setTimeout(updateDockBadge, 3000); | |
setInterval(updateDockBadge, 5000); |
package com.fullcontact.hadoop.cassandra; | |
import com.google.common.collect.Lists; | |
import org.apache.cassandra.db.marshal.AbstractType; | |
import org.apache.cassandra.db.marshal.CompositeType; | |
import org.apache.cassandra.db.marshal.UTF8Type; | |
import org.apache.cassandra.thrift.Column; | |
import org.apache.cassandra.thrift.ColumnOrSuperColumn; | |
import org.apache.cassandra.thrift.Mutation; | |
import org.apache.cassandra.utils.ByteBufferUtil; |
package storm.examples; | |
import backtype.storm.task.OutputCollector; | |
import backtype.storm.task.TopologyContext; | |
import backtype.storm.topology.OutputFieldsDeclarer; | |
import backtype.storm.topology.base.BaseRichBolt; | |
import backtype.storm.tuple.Fields; | |
import backtype.storm.tuple.Tuple; | |
import backtype.storm.tuple.Values; | |
import java.util.Map; |
// To force synchronous .queue() behavior | |
ConfigurationManager.getConfigInstance() | |
.setProperty("hystrix.command."+ReportUsageCommand.class.getSimpleName()+".execution.isolation.strategy", "SEMAPHORE"); | |
new ReportUsageCommand(my, ctor, args).queue(); // is now synchronous. |
$ sudo /opt/java7/bin/jmap -permstat 21101 | |
Attaching to process ID 21101, please wait... | |
Debugger attached successfully. | |
Server compiler detected. | |
JVM version is 24.51-b03 | |
finding class loader instances ..sun.jvm.hotspot.debugger.UnmappedAddressException: cb953000 | |
at sun.jvm.hotspot.debugger.PageCache.checkPage(PageCache.java:208) | |
at sun.jvm.hotspot.debugger.PageCache.getData(PageCache.java:63) | |
at sun.jvm.hotspot.debugger.DebuggerBase.readBytes(DebuggerBase.java:217) | |
at sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal.readCInteger(LinuxDebuggerLocal.java:482) |
I hereby claim:
To claim this, I am signing this object: