View mongo-rocks-startup.log
./mongod --storageEngine=rocksdb | |
2015-04-03T19:09:38.983+0100 I STORAGE Compression: snappy | |
2015-04-03T19:09:38.983+0100 I STORAGE MaxWriteMBPerSec: 1024 | |
2015-04-03T19:09:39.042+0100 I CONTROL [initandlisten] MongoDB starting : pid=16433 port=27017 dbpath=/data/db 64-bit host=fed-vm | |
2015-04-03T19:09:39.042+0100 I CONTROL [initandlisten] | |
2015-04-03T19:09:39.042+0100 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.1-pre-) of MongoDB. | |
2015-04-03T19:09:39.042+0100 I CONTROL [initandlisten] ** Not recommended for production. | |
2015-04-03T19:09:39.042+0100 I CONTROL [initandlisten] | |
2015-04-03T19:09:39.043+0100 I CONTROL [initandlisten] | |
2015-04-03T19:09:39.043+0100 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. |
View mm-standalone.conf
storage: | |
dbPath: "/data/db/300-mm" | |
directoryPerDB: true | |
journal: | |
enabled: true | |
systemLog: | |
destination: file | |
path: "/data/db/300-mm/mongodb.log" | |
logAppend: true | |
timeStampFormat: iso8601-utc |
View oidtest.js
// start a mongo shell to act as a JS interpreter (no db connection required) | |
mongo --nodb | |
// store sample id in a variable | |
var id = new ObjectId("533bc0f60015a0a814000001"); | |
// print out the variable | |
> id | |
ObjectId("533bc0f60015a0a814000001") | |
// try some methods | |
id.getTimestamp() | |
ISODate("2014-04-02T07:49:10Z") |
View timed_ex_explain.js
// the start/end is not really needed since explain contains timing information | |
// but, this is useful for comparison with other commands (touch) which do not have such info | |
var start = new Date().getTime(); | |
db.data.find().explain("executionStats") | |
var end = new Date().getTime(); | |
print("Time to touch data: " + (end - start) + "ms"); |
View mongo_mmap.conf
storage: | |
dbPath: "/ssd/db/mmap" | |
engine: "mmapv1" | |
systemLog: | |
destination: file | |
path: "/data/mmap/mongodb.log" | |
processManagement: | |
fork: true |
View mongo_wt_none.conf
storage: | |
dbPath: "/ssd/db/wt_none" | |
engine: "wiredTiger" | |
wiredTiger: | |
collectionConfig: "block_compressor=" | |
systemLog: | |
destination: file | |
path: "/data/wt_none/mongodb.log" | |
processManagement: | |
fork: true |
View mongo_wt_zlib.conf
storage: | |
dbPath: "/ssd/db/wt_zlib" | |
engine: "wiredTiger" | |
wiredTiger: | |
collectionConfig: "block_compressor=zlib" | |
systemLog: | |
destination: file | |
path: "/data/wt_zlib/mongodb.log" | |
processManagement: | |
fork: true |
View mongo_wt_snappy.conf
storage: | |
dbPath: "/ssd/db/wt_snappy" | |
engine: "wiredTiger" | |
systemLog: | |
destination: file | |
path: "/data/wt_snappy/mongodb.log" | |
processManagement: | |
fork: true |
View compress_test.js
// these docs, in 2.6, get bucketed into the 256 bucket (size without header = 240) | |
// From Object.bsonsize(db.data.findOne()), the size is actually 198 for reference, so add 16 to that for an exact fit | |
// with that doc size, 80,000 is a nice round number under the 16MiB limit, so will use that for the inner loop | |
// We are shooting for ~16 GiB of data, without indexes, so do 1,024 iterations (512 from each client) | |
// This will mean being a little short (~500MiB) in terms of target data size, but keeps things simple | |
for(var j = 0; j < 512; j++){ // | |
bigDoc = []; | |
for(var i = 0; i < 80000; i++){ |
View openELEC.sh
#!/bin/bash | |
# | |
# author: Tim "xGhOsTkiLLeRx" Brust | |
# license: CC BY-NC-SA 4.0 | |
# version: 0.2 | |
# date: 07/10/2014 | |
# description: replace (root) password of squashfs from openELEC | |
# usage: ./openELEC [password] [device] [hash] [user] | |
# dependencies: mkpassword (whois), squashfs-tools | |
# |
NewerOlder