https://www.kaggle.com/datasets/alexpunnen/wikipedia-history-of-save-failures
docker run --rm -it --net=host -v /home/xxx:/home alexcpn/fb_prophet_python:1 cd /home/xx//python python outlier_full.py
https://www.kaggle.com/datasets/alexpunnen/wikipedia-history-of-save-failures
docker run --rm -it --net=host -v /home/xxx:/home alexcpn/fb_prophet_python:1 cd /home/xx//python python outlier_full.py
Note: Region of Bucket and Data should be same
BigQuery ML Model - CREATE or REPLACE MODEL
CREATE OR REPLACE MODEL `computervision-159307.test.logistic_model` OPTIONS(model_type='LOGISTIC_REG',
input_label_cols=['output']) AS
SELECT
age,
cp,
def softmax(x): | |
"""Compute softmax values for each sets of scores in x. | |
why the max - see Eli's post https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/ | |
""" | |
e_x = np.exp(x - np.max(x)) # | |
return e_x / e_x.sum() | |
def derv_softmax(s): # where x is the input | |
""" |
#!/bin/bash | |
KUBELET_HOME=/var/lib | |
for podid in 05156637-1f26-4437-a0ea-3b60062c454a; | |
do | |
if [ ! -d ${KUBELET_HOME}/kubelet/pods/$podid ]; then | |
break | |
fi |
def conv2d(self, image, filter): | |
imagesize = image.shape | |
filter_size = filter.shape | |
print(imagesize) | |
i = imagesize[0] | |
j = imagesize[1] | |
k = imagesize[2] | |
fl = filter_size[0] # filter length of row | |
print("Image=", i, j, k, "Filter=", fl) | |
di = -1 |
We are taking from version v1.5.10. The latest v1.6.1 has a bug for external Ceph (git clone --single-branch --branch v1.5.10 https://github.com/rook/rook.git)
kubectl create -f rookv1.5.10/operator/crds.yaml
Some context here - https://developers.redhat.com/blog/2019/04/24/how-to-run-systemd-in-a-container
With Docker you need to mount the host volume -v /sys/fs/cgroup:/sys/fs/cgroup:ro
However this is not needed for podman
If Kubernetes cluster is still using Docker, this will not work in pod wihtout these mounts; but if using Containerd directly (kubelet--> Containerd-->runc) I am not sure
# https://www.kubeflow.org/docs/components/pipelines/sdk/pipelines-metrics/ - v1 flow | |
def model_evaluvate_v1(modelpath:InputPath("TF_Model"), | |
test_dataset_path:InputPath("TF_DataSet")) -> NamedTuple("Outputs", [ ("mlpipeline_metrics", "Metrics"),]): | |
from tensorflow import keras | |
import tensorflow as tf | |
from sklearn.metrics import confusion_matrix | |
import json | |
# Load data from the file path /KF components | |
model = keras.models.load_model(modelpath) |
vop = dsl.VolumeOp( | |
name="create-pvc", | |
resource_name="my-pvc", | |
modes=dsl.VOLUME_MODE_RWO, | |
size='1Gi') | |
# Passes a pipeline parameter and a constant value to the `add_op` factory | |
# function. | |
first_task = kubeflow_task(a).add_pvolumes({"/mnt": vop.volume}) | |
---- |
read_data = create_component_from_func(readdata,base_image="tensorflow/tensorflow:2.6.0", \ | |
packages_to_install=["pandas==0.24","sklearn","numpy","pyarrow"],\ | |
output_component_file='read_data_component.yaml') | |
# OR | |
read_data = load_component_from_url("https://gist.github.com/alexcpn/f82d63976656ffda0a508f9bafa28440/raw/628b828e13825e58f6ddfdf0bf0471e2ff93d527/read_data_component.yaml") | |
process_data = create_component_from_func(process_data,base_image="tensorflow/tensorflow:2.6.0", packages_to_install=["pandas==0.24","sklearn","numpy","pyarrow"]) | |
create_nn_model = create_component_from_func(create_nn_model,base_image="tensorflow/tensorflow:2.5.1") | |
train_model = create_component_from_func(train_model,base_image="tensorflow/tensorflow:2.5.1") |