sudo ss -lptn 'sport = :9000'
ps -p 1366 -o comm=
source './.env.sample'
export $(cut -d= -f1 './.env.sample')
Or
set -a
sed -e 's/^"//' -e 's/"$//'
npm run build:client:watch > ~/build_client_watch.log 2>&1 &
npm run build:server:watch > ~/build_server_watch.log 2>&1 &
npm run start:server:watch > ~/start_server_watch.log 2>&1 &
aws eks --region us-east-1 update-kubeconfig --name eks-prod-default
kubectl label node <node name> node-role.kubernetes.io/<role>=<role>
Example:
kubectl label node ip-10-10-0-47.ec2.internal node-role.kubernetes.io/main=
kubectl get all --field-selector='spec.nodeName=ip-10-10-0-76.ec2.internal'
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin <AWS-Account-ID>.dkr.ecr.us-east-1.amazonaws.com
docker run -p 80:80 \
-e 'PGADMIN_DEFAULT_EMAIL=username' \
-e 'PGADMIN_DEFAULT_PASSWORD=pass' \
-d dpage/pgadmin4
docker run -d --hostname localhost --name some-rabbit -p 5672:5672 -e RABBITMQ_DEFAULT_USER=admin -e RABBITMQ_DEFAULT_PASS="password" rabbitmq:3-management
kubectl patch cronjob.batch/your-cronjob-name -p '{"spec":{"suspend":true}}'
sort duplicate-file.txt | uniq > unique-file.txt
docker buildx build . --platform linux/amd64 -t keycloak-bcrypt:amd64
you'd need to enable tunnel-through-iap from IAM
gcloud compute ssh --zone "us-central1-a" "vm-intance-name" --project "prj-test-eng-ab98" --tunnel-through-iap
SELECT
*
FROM
pg_roles
WHERE
oid IN (SELECT
roleid
FROM
pg_auth_members
WHERE
member=(SELECT oid FROM pg_roles WHERE rolname='replicate_prod_master'));
gcloud config get project
gcloud config set project
kubectl get deployments -o=jsonpath='{range .items[?(@.spec.replicas > 0)]}{.metadata.name}{"\n"}{end}'
gsutil iam ch serviceAccount:service-account-name@project-id.iam.gserviceaccount.com:roles/storage.admin gs://somebucket
gcloud kms keys add-iam-policy-binding vault-kms --location global --keyring vault-auto-unseal-kr --member serviceAccount:some-sa@project-id.iam.gserviceaccount.com --role roles/cloudkms.admin
nohup bash backup-data-script.sh >> payments-logs.log 2>&1 &
- nohup: no hangup prevents process exit when you execute any command after this command
- >> : logs the output to the file
- 2>&1: stderr to stdout redirection to log errors
aws route53 list-resource-record-sets --hosted-zone-id "/zone/ZU8UWIY5O0XYZ" | jq -r '.ResourceRecordSets[] | [.Name, .Type, (.ResourceRecords[]? | .Value), .AliasTarget.DNSName?] | @csv'
cli53 export --full ZU8UWIY5O0XYZ > output
gcloud dns record-sets import --zone-file-format input-file -z="target-zone-on-gcp" --delete-all-existing
gcloud dns record-sets export target-file.txt -z=zone-on-gcp
gcloud dns managed-zones create --dns-name=yourzone.com \
--description='description' yourzone-com
sudo tar --exclude='*/node_modules' --exclude='*/.pycache' --exclude='*/.next' -zcvf target.tgz source.dir
aws ec2 attach-volume --region=us-east-1 --volume-id=vol-0449c1beee1eb8b31 --instance-id=i-02ed5c770220e38ee --device=/dev/sdk
following mounts /dev/sda1 onto /media/2tb
sudo mkdir /media/2tb
sudo mount -o ro /dev/sda1 /media/2tb
sudo scp -i ~/.ssh/ssh-private-key source/path ubuntu@x.x.x.x:/home/ubuntu/targetDir
gcloud compute instances add-iam-policy-binding instance-name-here \
--member='user:user@email.com' \
--role='roles/compute.admin' \
--zone="us-west1-b"
{ "Contents":
[
{
"Key" "s3 key",
"RestoreStatus: {...}
},
{
"Key" "s3 key",
"RestoreStatus: {...}
}
]
}
aws s3api list-objects --optional-object-attributes RestoreStatus --bucket aqapop | jq -r '.Contents | map(if has("RestoreStatus") then "already restored:"+.Key else .Key end) | .[]'
jq -r '.Contents | map(if has("RestoreStatus") then "already restored:"+.Key else .Key end) | .[]'
- -r: removes the quotes from the final value(s)
- .Contents returns the Content array (which can be mapped)
- has("RestoreStatus") checks if the key exists for the object
- .[] returns the values of the list i.e without '[]' in the
sed -e 's!gs://aqapop/!!' gcp-aqapop-list.txt > gcp-aqapop-list-cleaned.txt
Here we are removing gs://aqapop/ from every line of the file