I hereby claim:
- I am kiwimato on github.
- I am maihai (https://keybase.io/maihai) on keybase.
- I have a public key ASDUBhsQJr57e0cpoDmBuuPWOF7QTWq1XJACDie8SbHpBwo
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
#!/bin/bash | |
# This script converts DB files created for NSS while you upgrade from RHEL6 to RHEL7 or RHEL8. | |
# It only happens when you upgrade from RHEL6 | |
# Fixes a problem where the groups are not properly added to the file like the following: | |
# :john john 2001,2500,4 | |
# First copy the old file to a new file | |
cp group.tdb group_test.tdb | |
for line in `grep ^= group.tdb | grep :$ | awk '{print $2}'`; do |
# I made this script because CodeShip for some reason doesn't have an option to download the logs, so any logs bigger | |
# than your browser can handle will make it hang. You just need the jq installed for this script to work. | |
# How-to: | |
# If you open the Developer console and look at Network when you click a log, you will see something with the name: | |
# 'jet_log?start_index=' <- Just right click it, Copy => Copy as cURL. | |
# Then simply execute the command and pipe it to a file, or the following command: | |
# jq -c .service_log[].payload | tr -d \" | base64 -d | |
# Append this to your .bashrc, to use it at the end of the cURL command with a pipe, as so: | |
# <super long cURL command> | codeship-extract |
import serial, time, os, traceback | |
from influxdb import InfluxDBClient | |
# InfluxDB details | |
dbname = os.getenv('INFLUXDB_DATABASE', 'airquality') | |
username = os.getenv('INFLUXDB_USER') | |
password = os.getenv('INFLUXDB_PASSWORD') | |
host = os.getenv('INFLUXDB_HOST', '192.168.100.21') | |
port = os.getenv('INFLUXDB_PORT', 8086) |
# .bashrc | |
# Source global definitions | |
if [ -f /etc/bashrc ]; then | |
. /etc/bashrc | |
fi | |
# User specific environment | |
PATH="$HOME/.local/bin:$HOME/bin:$PATH" | |
export PATH |
#!/usr/bin/env bash | |
# ./find-ecr-image.sh 1234567890.dkr.ecr.eu-west-1.amazonaws.com/supersecret:tag | |
REGISTRY_ID="${1%%.*}" | |
IMAGE_TAG=${1##*:} | |
REPOSITORY="$(echo ${1%%:*} | cut -d / -f 2,3)" | |
if [[ -z "$REGISTRY_ID" ]] || [[ -z "$IMAGE_TAG" ]] || [[ -z "$REPOSITORY" ]] ; then | |
echo "Usage: $( basename $0 ) url:tag" | |
echo "Example: $( basename $0 NNNNNNNNN.dkr.ecr.eu-west-1.amazonaws.com/something:master) " |
#!/usr/bin/env bash | |
# Checks if a docker image tag exists in AWS ECR, and returns an exit code different than 0 if it doesn't. | |
REGISTRY_ID="${1%%.*}" | |
IMAGE_TAG=${1##*:} | |
REPOSITORY="$(echo ${1%%:*} | cut -d / -f 2,3)" | |
if [[ -z "$REGISTRY_ID" ]] || [[ -z "$IMAGE_TAG" ]] || [[ -z "$REPOSITORY" ]] ; then | |
echo "Usage: $( basename $0 ) url:tag" | |
echo "Example: $( basename $0 NNNNNNNNN.dkr.ecr.eu-west-1.amazonaws.com/repo-something:latest) " |
#!/bin/bash | |
# Execute as : | |
# ./s3_buckets_size.sh list.txt | |
# where list.txt is a list of all bucket | |
for bucket in `cat $1`;do | |
SIZE=$(aws cloudwatch get-metric-statistics --namespace AWS/S3 --start-time $(date +%Y-%m-%dT%H:%M:%S -d "00:00 last week") --end-time $(date +%Y-%m-%dT%H:%M:%S) --period 86400 --statistics Average --region eu-west-1 --metric-name BucketSizeBytes --dimensions Name=BucketName,Value=$bucket Name=StorageType,Value=StandardStorage | jq .Datapoints[].Average | tail -1) | |
echo $bucketname $(numfmt --to iec --format "%8.1f" ${SIZE:-"0"}) | |
done |
#!/bin/bash | |
# Limit the GPU power draw to 115W | |
nvidia-smi -pl 115 | |
function check_if_active { | |
nvidia-smi --query-compute-apps=name --format=csv,noheader | grep -q "$1" | |
} | |
while : ;do |
#!/bin/bash | |
# Vmware tools fixer | |
# When vmware tools refuse to start | |
vmware-guestproxycerttool -g -f&& | |
/usr/bin/vmware-config-tools.pl -d&& | |
/etc/vmware-tools/services.sh restart | |
# Fix automated tools install | |
mkdir /p /media/cdrom && |