- Starting: https://github.com/basho/riak_kv/blob/1.4.2/src/riak_kv_wm_object.erl#L619
- We create a new
riak_object
and populate the various fields with the headers, metadata supplied by the client. - Big suprise, we eventually call
riak_client:put
: https://github.com/basho/riak_kv/blob/1.4.2/src/riak_client.erl#L143 - If/when the client returns any errors these are handled in
handle_common_errors
and it is nice to return human readable errors to client :)
#Map Reduce Delete Instructions
Set allow_strfun true on all nodes:
echo "rpc:multicall([node() | nodes()], application,set_env,[riak_kv,allow_strfun,true])." | bin/riak attach
Insert 15000 keys:
for i in {1..15000}; do curl -XPUT http://127.0.0.1:9000/buckets/test/keys/test$i -H "content-type: text/plain" -d "Data #$i\n"; done
#Riak Multi DC Repl Cheat Sheet
##Types There are two types of multi data center replication in Riak.
-
Fullsync
Operation is triggered by connection creation between clusters, running
riak-repl start-fullsync
on the listener leader, or every fullsync_interval minutes. Relevant app.config settings:
Entry point for all object operations: https://github.com/basho/riak_kv/blob/1.4.2/src/riak_kv_wm_object.erl
delete_resource/2 takes RequestData(Request header, ex: vclock) and Context(Record containing: Bucket, Key, Client): https://github.com/basho/riak_kv/blob/1.4.2/src/riak_kv_wm_object.erl#L888
#How To: Clone A Cluster
##Summary This gist will walk through the procedure of altering a secondary cluster using the ring configuration of a primary cluster. This configuration allows partitions to be transferred between clusters using any file transfer utility.
##Restrictions
-
Supported Riak Versions: 1.2
-
Both clusters must have same ring size and node count.
-module(riak_metrics). | |
-compile(export_all). | |
main([NodeName0, Cookie, Length, Command]) -> | |
LocalName = 'riak_metrics@127.0.0.1', | |
NodeName = list_to_atom(NodeName0), | |
case net_kernel:start([LocalName]) of | |
{ok, _} -> | |
erlang:set_cookie(node(), list_to_atom(Cookie)), | |
case net_kernel:hidden_connect_node(NodeName) of |
-module(heap_query). | |
-compile(export_all). | |
main([NodeName0, Cookie]) -> | |
Name = 'heap_query@127.0.0.1', | |
NodeName = list_to_atom(NodeName0), | |
case net_kernel:start([Name]) of | |
{ok, _} -> | |
erlang:set_cookie(node(), list_to_atom(Cookie)), | |
case net_kernel:hidden_connect_node(NodeName) of |
/Users/bsparrow/Riak_git/otp/erts/etc/common/heart.c: | |
499 } | |
500 } else if (tlen == 0) { | |
501: /* Erlang has closed its end */ | |
502: print_error("Erlang has closed."); | |
503 return R_CLOSED; | |
504 } | |
/Users/bsparrow/Riak_git/otp/erts/etc/common/inet_gethost.c: | |
541 return 1; |
-module(repair). | |
-compile(export_all). | |
main([Dir]) -> | |
Opts = [{max_open_files, 2000}, | |
{use_bloomfilter, true}, | |
{write_buffer_size, 45 * 1024 * 1024}, | |
{compression,false}], | |
{Time,_} = timer:tc(eleveldb,repair,[Dir, Opts]), |
Bucket = <<"d">>, | |
Key = <<"e0c97a4cde6c">>, | |
BKey = {Bucket,Key}, | |
{ok, Ring} = riak_core_ring_manager:get_my_ring(), | |
DocIdx = riak_core_util:chash_key(BKey), | |
BucketProps = riak_core_bucket:get_bucket(Bucket, Ring), | |
[NValue] = [Y || {X1, Y} <- BucketProps, n_val == X1], | |
UpNodes = riak_core_node_watcher:nodes(riak_kv), | |
Preflist2 = riak_core_apl:get_apl_ann(DocIdx, NValue, Ring, UpNodes), | |
Preflist = [{IndexNode, Type} || {IndexNode, Type} <- Preflist2]. |