Skip to content

Instantly share code, notes, and snippets.

@davaymne
davaymne / Grafana - Main Page
Last active January 12, 2021 11:36
Grafana - Main Page
{
"__inputs": [
{
"name": "DS_GRAPHQL_DATA SOURCE",
"label": "GraphQL Data Source",
"description": "",
"type": "datasource",
"pluginId": "fifemon-graphql-datasource",
"pluginName": "GraphQL Data Source"
},
#!/bin/bash
# Log may contain different subgraphs - filter subgrafs manualy first of all
# query.20201126_44.log
GRAPHS_LIST="<PATH>/all-graphs-list.csv"
TIMESTAMP=$(date --date="yesterday" +%Y%m%d)
#NAME_BASE=$(echo $1 | cut -d'.' -f 1,2)
NAME_BASE="query.${TIMESTAMP}"
LOGS="/fluentd/log/query-node/${NAME_BASE}*"
SUMMARY_JSONL="/fluentd/log/summary/${NAME_BASE}.summary.jsonl"
# Script to convert ipfs to b58decode, eg QmTN6gMCBCYTgbis33eSvr6sLsQoQ3R6a2KzR7CqmMABQP -> '0xdfffeffgkjfijfoiqfioqmfqiomfdfffeffgkjfijfoiqfioqmfqiomf
# Require: file with list of ipfs: each row is one ipfs
# Usage: python3 convert-ipfs-to-bytes32.py file.csv
import binascii
import base58
import sys
def ipfs_b32(ipfs):
output = base58.b58decode(ipfs)
output = output[2:]
#!/bin/bash
# Loga may contain different subgraphs - filter subgrafs manualy first of all
NAME_BASE=$(echo $1 | cut -d'.' -f 1)
SUMMARY_JSONL="${NAME_BASE}-summary.jsonl"
OUTPUT_JSONL="${NAME_BASE}.jsonl"
TREEBUF="${NAME_BASE}.treebuf"
AGORA=<PATH>agora1/agora/target/release/agora
echo "Converting log to jsonl $1"
import sys
with open(sys.argv[1], 'r') as f:
n = 0
for line in f:
if len(line) > n:
n = len(line)
with open(sys.argv[1], 'r') as f:
for line in f:
new = '{}{}'.format(line[0].lower(), line[1:]).strip().ljust(n)
#!/bin/bash
for id in $(cat $1);
do
echo "Allocate $id, Amount $2"
graph indexer rules set $id allocationAmount $2 parallelAllocations 1 decisionBasis always
done
# Script for bulk costmodel provisioning.
# Require: tsv file (fields tab separated)
# File format: 4 fields: name, id, full path to costmodel, variable string
# How to use: python3 set-bulk-costmodel.py my-costmodels.tsv
import sys
import subprocess
with open(sys.argv[1], 'r') as f:
for line in f:
print(line)
name, id, model, variables = line.split('\t')
# Requires argument tsv file: b32 id name allocation
import sys
import subprocess
with open(sys.argv[1], 'r') as f:
for line in f:
print(line)
b32, id, name, allocation = line.strip().split('\t')
#print(id, model)
# graph indexer rules never $id
cmd = ['graph', 'indexer', 'rules', 'never', id]
# Consumes json summary file
# output call count per subgraph
summaries=""
day=2020115
day_summary=$(jq -s 'group_by(.subgraph) |
map({"subgraph":(.[0].subgraph), "calls":(reduce .[].calls as $calls (0; . + $calls)), "date":("'$day'") }) |
map([.subgraph, .calls, .date] | join(", ")) | join(" \r\n ")' < $1)
summaries+="${day_summary//\"} \r\n"
printf "${summaries//\"}" > subgraph_daily_summary.csv
{
"__inputs": [
{
"name": "DS_GRAPHQL_DATA SOURCE",
"label": "GraphQL Data Source",
"description": "",
"type": "datasource",
"pluginId": "fifemon-graphql-datasource",
"pluginName": "GraphQL Data Source"
},