Setup some variables first
export PROJECT_ID=$(gcloud config get-value project)
export REGION=europe-west3 # should be the same as your mig
export MIG_NAME=service-x-mig # or whatever you already have
# Step 1: Use the kasten UI to create a role (<role>) | |
# Create service account for those who needs access | |
kubectl create serviceaccount <user> --namespace kasten-io | |
# For the namespace that the user needs access to: | |
kubectl create rolebinding backup_manager1 --namespace <namespace> \ | |
--clusterrole=<role> \ | |
--serviceaccount=kasten-io:<user> | |
# THIS IS A VERY HACKY AD-HOC SOLUTION TO BALANCE NODES BASED ON MEMORY USAGE ACROSS THE CLUSTER | |
import subprocess | |
import json | |
import time | |
def execute_kubectl_command(command): | |
"""Executes a kubectl command and returns the output.""" | |
full_command = ["kubectl"] + command.split() | |
result = subprocess.run(full_command, capture_output=True, text=True) |
#!/bin/bash | |
fix_rgb() { | |
fixes=0 | |
dryrun=false | |
if [ ! -t 0 ]; then | |
read -p "Do you want to proceed with fixing? (y/n): " answer | |
if [ "$answer" = "n" ]; then | |
dryrun=true |
{ | |
init: function(elevators, floors) { | |
// helper function to check if a floor is already queued | |
function isFloorQueued(elevator, floorNum) { | |
return elevator.destinationQueue.indexOf(floorNum) > -1; | |
} | |
function isFloorQueuedForAnyElevator(floorNum) { | |
for (let e = 0; e < elevators.length; e++) { |
WITH p AS ( | |
WITH q AS ( | |
WITH y AS ( | |
WITH x as ( | |
SELECT | |
sm.entity_id, | |
s.state, | |
s.last_updated_ts, | |
COALESCE(LAG(s.state) OVER (partition by sm.entity_id ORDER BY last_updated_ts ASC), s.state) as last_state | |
FROM states s |
# BEGIN: 8f7d6h3j4k5l | |
import unittest | |
import requests | |
import jwt | |
import time | |
class TestAPI(unittest.TestCase): | |
token = None | |
api_basepath = "https://api-test.xl-byg.dk" |
from zeroconf import ServiceBrowser, ServiceListener, Zeroconf, ZeroconfServiceTypes | |
class MyListener(ServiceListener): | |
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None: | |
info = zc.get_service_info(type_, name) | |
print("Updated: \t", format_info(info)) | |
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: |
//Name | |
=IFNA(QUERY('Jira sync'!A:O, "select O where B = '"& parent & "' AND A = 'Epic' LIMIT 1",0), "") | |
//Project | |
=IFNA(QUERY('Jira sync'!A:C, "select C where B = '"& parent & "' AND A = 'Epic' LIMIT 1",0), 0) | |
//Risk | |
=IFNA(QUERY('Jira sync'!A:M, "select M where B = '"& parent & "' AND A = 'Epic' LIMIT 1",0), 0) | |
//Risk Reasoning |
2021.06.24 08:39:52.882235 [ 49925 ] {} <Debug> MergeTreeSequentialSource: Reading 2 marks from part 202106_330750_330750_0, total 54 rows starting from the beginning of the part | |
2021.06.24 08:39:52.893411 [ 49925 ] {} <Debug> analytics.events_local (8fcee572-2b74-42f2-8fce-e5722b74c2f2) (MergerMutator): Merge sorted 2913 rows, containing 46 columns (46 merged, 0 gathered) in 0.012852513 sec., 226648.28271327174 rows/sec., 94.10 MiB/sec. | |
2021.06.24 08:39:52.914845 [ 49925 ] {} <Trace> analytics.events_local (8fcee572-2b74-42f2-8fce-e5722b74c2f2): Renaming temporary part tmp_merge_202106_330565_330750_17 to 202106_330565_330750_17. | |
2021.06.24 08:39:52.914934 [ 49925 ] {} <Trace> analytics.events_local (8fcee572-2b74-42f2-8fce-e5722b74c2f2) (MergerMutator): Merged 5 parts: from 202106_330565_330743_16 to 202106_330750_330750_0 | |
2021.06.24 08:39:52.918118 [ 49925 ] {} <Debug> MemoryTracker: Peak memory usage: 12.40 MiB. | |
2021.06.24 08:39:53.272905 [ 49929 ] {} <Trace> system.metric_log (22746af7-8e26-4b0f-a274-6af |