Skip to content

Instantly share code, notes, and snippets.

#!/bin/bash
# Check if the input parameters are provided
if [ $# -ne 2 ]; then
echo "Usage: $0 <date> <number>"
exit 1
fi
date_input="$1"
number="$2"
#!/bin/bash
# Number of sequential checks when the instance had utilization below the threshold.
COUNTER=0
# If actual CPU utilization is below this threshold script will increase the counter.
THRESHOLD_PERCENT=2
# Interval between checks of the CPU utilizations.
SLEEP_INTERVAL_SECONDS=5
# How big COUNTER need to be for the script to shutdown the instance. For example,
# if we want an instance to be stopped after 20min of idle. Each utilization probe
export ANACONDA_URL=https://repo.anaconda.com/archive/Anaconda3-5.2.0-Linux-x86_64.sh
export ANACONDA_INSTALLER=anaconda_installer.sh
wget $ANACONDA_URL -O $ANACONDA_INSTALLER
chmod +x $ANACONDA_INSTALLER
./$ANACONDA_INSTALLER -b
echo "export PATH=$PATH:$HOME/anaconda3/bin" >> $HOME/.bashrc
source $HOME/.bashrc
epoch = 100
for _ in range(epoch):
for i in range(len(X)):
x, y = torch.tensor(X[i]), torch.tensor(Y[i])
y_predict = model(x)
loss_tensor = loss(y_predict, y)
loss_tensor.backward()
loss_value = loss_tensor.data[0]
with torch.no_grad():
%pip install torchviz==0.0.2
# one iteration
x = X[0]
y = Y[0]
y_predicted = model(x)
loss_tensor = loss(y_predicted, y)
loss_value = loss_tensor.data[0]
print(f"x: {x}, actual y: {y}, predicted y: {y}, loss: {loss_value}")
print(f"w: {w.data[0]}, b: {w.data[0]}")
@b0noI
b0noI / loss.py
Last active October 31, 2021 00:24
def loss(y_predict, y_actual):
return torch.pow(y_predict - y_actual, 2)
import torch
torch.manual_seed(2021)
w = torch.rand(1, requires_grad=True, dtype=torch.float64)
b = torch.rand(1, requires_grad=True, dtype=torch.float64)
def model(X):
return X * w + b
import numpy as np
def f(x):
return x * 2 + 1
rng = np.random.default_rng(2021)
X = rng.random(1000)
Y = [f(x) for x in X]
export CONTAINER_URI="gcr.io/deeplearning-platform-release/experimental.theia.1-7"
export INSTANCE_NAME=...
export PROJECT_NAME=...
export IMAGE_PROJECT="deeplearning-platform-release"
export IMAGE_FAMILY="theia-container-experimental"
export MACHINE_TYPE=... #"n1-standard-4"
export ZONE=... #"us-central1-a"
gcloud compute instances create "${INSTANCE_NAME}" \
--project="${PROJECT_NAME}" \
--zone="${ZONE}" \
# see list of containers: https://github.com/JetBrains/projector-docker
FROM registry.jetbrains.team/p/prj/containers/projector-idea-c:latest
ENV ORG_JETBRAINS_PROJECTOR_SERVER_PORT="8080"
EXPOSE 8080