A function compare_json_data(source_data_a,source_data_b)
, accepting structures populated with data loaded from json.load()
and comparing for equality.
$ ./compare.py
Compare JSON result is: True
// a list of useful queries for profiler analysis. Starting with the most basic. | |
// 2.4 compatible | |
// | |
// output explained: | |
// | |
{ | |
"ts" : ISODate("2012-09-14T16:34:00.010Z"), // date it occurred | |
"op" : "query", // the operation type | |
"ns" : "game.players", // the db and collection |
/* MongoDB cheat sheet */ | |
// replication lag via serverStatus() | |
db._adminCommand( { serverStatus : 1 , repl ; 2 } ) | |
// stats | |
db.stats() | |
db.foo.stats() | |
// size of BSON of some query |
A function compare_json_data(source_data_a,source_data_b)
, accepting structures populated with data loaded from json.load()
and comparing for equality.
$ ./compare.py
Compare JSON result is: True
-- PostgreSQL cheat sheet | |
--postgres is set up to use local.6 which is syslog'd to sflog001 | |
--slow query log is /var/log/localmessages | |
--config files are always in /data/friend/*.conf | |
--vacuums are set via cfengine, we use both manual and auto. vacuums/analyze help with frozen id's being recouped, and thus TX'id's not going over 2b thus causing massing shutdown/reset. Fix it to exp/imp high TX tables. | |
--to log into psql: psql -U postgres -d <DB> (usually friend) |
{"userid": 100, "amount": 10000, "timestamp": 1618852483} | |
{"userid": 200, "amount": 23000, "timestamp": 1618852483} | |
{"userid": 100, "amount": 4000, "timestamp": 1618848883} | |
{"userid": 200, "amount": 300, "timestamp": 1618848883} |
SELECT | |
userid, | |
SUM(amount) AS theamount, | |
tumble_end(timestamp, interval '1' hour) AS ending_ts | |
FROM mystream | |
GROUP BY userid, tumble(timestamp, interval '1' hour) |
{"userid": 100, "theamount": 14000, "ending_ts": 1618848883} | |
{"userid": 200, "theamount": 23300, "ending_ts": 1618848883} |
// hello world | |
function HELLOWORLD() { | |
return "Hello World"; | |
} | |
HELLOWORLD(); |
-- return aggregation of total payments | |
-- over a 1 hour tumbling window | |
SELECT SUM(CAST(amount AS numeric)) AS payment_volume, | |
CAST(TUMBLE_END(eventTimestamp, interval '1' hour) AS varchar) AS ts | |
FROM payments | |
GROUP BY TUMBLE(eventTimestamp, interval '1' hour); |
-- eventTimestamp is the Kafka timestamp | |
-- as unix timestamp. Magically added to every schema. | |
SELECT max(eventTimestamp) FROM solar_inputs; | |
-- make it human readable | |
SELECT CAST(max(eventTimestamp) AS varchar) as TS FROM solar_inputs; | |
-- dete math with interval | |
SELECT * FROM payments | |
WHERE eventTimestamp > CURRENT_TIMESTAMP-interval '10' second; |