curl -sSL -o clone.c goo.gl/G45N5X
# on host
{ | |
"workbench.colorCustomizations": { | |
"terminal.foreground": "#839496", | |
"terminal.background": "#002833", | |
"terminal.ansiBlack": "#003541", | |
"terminal.ansiBlue": "#268bd2", | |
"terminal.ansiCyan": "#2aa198", | |
"terminal.ansiGreen": "#859901", | |
"terminal.ansiMagenta": "#d33682", | |
"terminal.ansiRed": "#dc322f", |
alias acr-login="echo 'Fetching available Azure container registries...'; az acr list -otable --query='[*].{Name:name,Group:resourceGroup,Location:location}'; echo '\nEnter registry name:'; read azregistry; az acr credential show -n \$azregistry --query='[passwords[0]][0].value' -otsv | docker login -u \$azregistry --password-stdin \$azregistry.azurecr.io" |
{ | |
"swagger": "2.0", | |
"info": { | |
"version": "2016-09-12T23:19:28Z", | |
"title": "ProxyAPIwithCreds" | |
}, | |
"host": "my-domain.com", | |
"basePath": "/test", | |
"schemes": [ | |
"https" |
{ | |
"$schema": "http://json.schemastore.org/proxies", | |
"proxies": { | |
"API proxy with Credentials": { | |
"matchCondition": { | |
"route": "/api/{*route}", | |
"methods": [ | |
"GET" | |
] | |
}, |
AKSNODE=aks-nodepool1-32072832-0; kubectl cordon $AKSNODE && kubectl drain $AKSNODE --ignore-daemonsets=true && echo "$AKSNODE restarting" && az vm restart -g MC_containergroupeast_managedkube_eastus -n $AKSNODE && sleep 10 && kubectl uncordon $AKSNODE && until [ $(kubectl get node $AKSNODE --output=json | jq -c '.status.conditions | map(select(.type == "Ready"))[0].status') = '"True"' ]; do echo "sleeping 10 seconds\n" && sleep 10; done && echo "$AKSNODE successfully restarted" |
This Hackathon is designed for beginners. No prior experience is necessary.
Azure provides powerful AI tools, wrapped up in APIs that you can use from your apps to add intelligence without being an AI expert. In this hands-on workshop you will build a Python app that takes advantage of one of these APIs to detect emotions in faces, alerting you if you have a sad face too often. This app will be in two parts, one part that runs on the desktop and takes photos, and another that runs in the cloud to analyse photos and store the emotions detected.
# Conversion of Microsoft Stream WebVTT file to SRT | |
# Takes advantage of specific metadata structure in Microsoft Stream VTT files | |
vtt = open('INPUT.vtt','r') | |
vtt_iter = iter(vtt) | |
with open('OUTPUT.srt', 'w') as srt: | |
counter = 1 | |
next(vtt_iter) | |
for line in vtt_iter: |
FFMpeg must be installed.
Instructions:
brew install ffmpeg
INFILE="video.mp4" | |
OUTFILE="shortenedclip.mp4" | |
START="00:00:12.35" # Start Time in hh:mm:ss.msec format | |
DURATION="00:01:05.4" # Duration in hh:mm:ss.msec format | |
################## Alternative format ################## | |
# START="12.35" # Start time in s.msec format # | |
# DURATION="65.4" # Duration time in s.msec format # | |
######################################################## |