View slowlog_new_output.txt
127.0.0.1:6379> client setname worker-502
OK
127.0.0.1:6379> debug sleep 1
OK
(1.00s)
127.0.0.1:6379> slowlog get
1) 1) (integer) 0
2) (integer) 1497523890
3) (integer) 1002448
4) 1) "debug"
View test_64.c
#include <stdint.h>
#include <pthread.h>
#include <stdio.h>
volatile uint64_t x = 0;
void *writeThread(void *arg) {
for (uint64_t i = 0; i < 10000000; i++) {
while(!__sync_bool_compare_and_swap(&x,x,i+(i<<32))); // x = i+(i<<32)
}
View background_keys.c
/* The thread entry point that actually executes the blocking part
* of the command HELLO.KEYS.
*
* Note: this implementation is very simple on purpose, so no duplicated
* keys (returned by SCAN) are filtered. However adding such a functionality
* would be trivial just using any data structure implementing a dictionary
* in order to filter the duplicated items. */
void *HelloKeys_ThreadMain(void *arg) {
RedisModuleBlockedClient *bc = arg;
RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc);
View lmdb.tcl
# LVDB - LLOOGG Memory DB
# Copyriht (C) 2009 Salvatore Sanfilippo <antirez@gmail.com>
# All Rights Reserved
# TODO
# - cron with cleanup of timedout clients, automatic dump
# - the dump should use array startsearch to write it line by line
# and may just use gets to read element by element and load the whole state.
# - 'help','stopserver','saveandstopserver','save','load','reset','keys' commands.
# - ttl with milliseconds resolution 'ttl a 1000'. Check ttl in dump!
View Scaletta.md
  • Overview delle strutture dati.
  • Persistenza: fork() e copy on write.
  • Implementazione di EXPIRE con sampling a approssimazione dell'algoritmo LRU.
  • Indici secondari usando i sorted set (inclusi i comandi LEX).
  • Geoindexing rappresentato da sorted set.
  • Mass import di dati tramite redis-cli.
  • Protocollo REPL.
  • Lua scripting.
  • Implementazione low level delle strutture dati: 1) Implementazione dual-ported dei sorted set (hash table + skiplist). 2) Hash table con rehashing incementale. 3) Quicklists: ovvero le liste di Redis sono implementate come delle liste linkate di mega-blob che contengono N oggetti.
  • Maxmemory e policy di eviction al raggiungimento della memoria. LRU e LFU.
View rax_iterator_api.c
void somefunc(rax *mytree) {
raxIterator iter;
raxStart(&iter,mytree);
/* As seek operator you can use >, <, >=, <=, == */
raxSeek(&iter,(unsigned char*)"chromo",6,"<="); /* Seek key <= "chromo" */
while(raxNext(&iter,NULL,0,NULL)) { /* or raxPrev() */
printf("Current key: %.*s, val %p\n",
(int)iter.key_len, (char*)iter.key, iter.data);
}
View prng_bleagh.c
/* MacOS libc PRNG is garbage!
* Test this on MacOS 10.12.3 (may be the same with other versions)
* You'll see three times the same output.
* Cheers by @antirez. */
#include <stdio.h>
#include <stdlib.h>
int main(void) {
/* Note, 1000000, 1000001, 1000002 are not special.
View radtree.md

Radix tree ASCII printing.

[r] -> [ou]
           `-(o) [m] -> [au]
                         `-(a) [n] -> [eu]
                                       `-(e) []=0x0
                                       `-(u) [s] -> []=0x1
                         `-(u) "lus" -> []=0x2
           `-(u) [b] -> [ei]
View patch.diff
diff --git a/src/evict.c b/src/evict.c
index 802997c..24fb283 100644
--- a/src/evict.c
+++ b/src/evict.c
@@ -336,11 +336,34 @@ unsigned long LFUDecrAndReturn(robj *o) {
* server when there is data to add in order to make space if needed.
* --------------------------------------------------------------------------*/
+/* We don't want to count AOF buffers and slaves output buffers as
+ * used memory: the eviction should use mostly data size. This function
View 4.2-roadmap.md

Redis 4.2 roadmap

  1. Redis Cluster
  • Speed up key -> hashslot association. Now makes RBB loading 4x slower when there are many small keys.
  • Better multi data center story
  • redis-trib C coded and moved into redis-cli
  • Backup / Restore of Cluster
  • Non blocking MIGRATE (also consider not using 2X memory)
  • Faster resharding
  • Bug fixing and stress testing to bring it to next level of maturity