# Install AVD files
yes | $ANDROID_HOME/tools/bin/sdkmanager --install 'system-images;android-29;default;x86'
yes | $ANDROID_HOME/tools/bin/sdkmanager --licenses
# Create emulator
echo "no" | $ANDROID_HOME/tools/bin/avdmanager create avd -n Pixel_API_29_AOSP -d pixel --package 'system-images;android-29;default;x86' --force
$ANDROID_HOME/emulator/emulator -list-avds
const tf = require('@tensorflow/tfjs-node'); | |
const Jimp = require('jimp'); | |
// Directory path for model files (model.json, metadata.json, weights.bin) | |
// NOTE: It can be obtained from [Export Model] -> [Tensorflow.js] -> [Download my model] | |
// on https://teachablemachine.withgoogle.com/train/image | |
const MODEL_DIR_PATH = `${__dirname}`; | |
// Path for image file to predict class | |
const IMAGE_FILE_PATH = `${__dirname}/example.jpg`; |
# Having a requirements.txt file has follows | |
torch==1.5.0 | |
numpy==1.18.1 | |
# Add channels. Last added is with the highest priorety | |
conda config --add channels pytorch | |
conda config --add channels conda-forge | |
conda config --add channels anaconda | |
# Install pip for fallback |
pip install bert-for-tf2 | |
pip install bert-tokenizer | |
pip install tensorflow-hub | |
pip install bert-tensorflow | |
pip install sentencepiece | |
import tensorflow_hub as hub | |
import tensorflow as tf | |
import bert |
""" Kedro Torch Model IO | |
Models need to be imported and added to the dictionary | |
as shown with the ExampleModel | |
Example of catalog entry: | |
modo: | |
type: kedro_example.io.torch_model.TorchLocalModel | |
filepath: modo.pt |
#!/usr/bin/env bash | |
### Bash Environment Setup | |
# http://redsymbol.net/articles/unofficial-bash-strict-mode/ | |
# https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html | |
# set -o xtrace | |
set -o errexit | |
set -o errtrace | |
set -o nounset | |
set -o pipefail |
''' ExportModel.py - TF-Serving | |
# Basically we are wrapping your pretrained model | |
# in a tensorflow serving compatible format. | |
# This excepts base64 encoded png images and uses | |
# them as input to your model. Then we convert | |
# your models output into a png encoded image and | |
# it gets returned by tensorflow serving base64 encoded. | |
''' | |
import tensorflow as tf |
For an emulator that mimics a Pixel 5 Device with Google APIs and ARM architecture (for an M1/M2 Macbook):
-
List All System Images Available for Download:
sdkmanager --list | grep system-images
-
Download Image:
sdkmanager --install "system-images;android-30;google_atd;arm64-v8a"
// Create a Singularity image from a Docker image that is in the Docker hub | |
// where /tmp/ is the folder where the image will be created and ubuntu:14.04 | |
// is the docker image used to convert to the Singularity image | |
docker run \ | |
-v /var/run/docker.sock:/var/run/docker.sock \ | |
-v /tmp/:/output \ | |
--privileged -t --rm \ | |
singularityware/docker2singularity \ | |
ubuntu:14.04 | |
// |
Ramp up your Kubernetes development, CI-tooling or testing workflow by running multiple Kubernetes clusters on Ubuntu Linux with KVM and minikube.
In this tutorial we will combine the popular minikube
tool with Linux's Kernel-based Virtual Machine (KVM) support. It is a great way to re-purpose an old machine that you found on eBay or have gathering gust under your desk. An Intel NUC would also make a great host for this tutorial if you want to buy some new hardware. Another popular angle is to use a bare metal host in the cloud and I've provided some details on that below.
We'll set up all the tooling so that you can build one or many single-node Kubernetes clusters and then deploy applications to them such as OpenFaaS using familiar tooling like helm. I'll then show you how to access the Kubernetes clusters from a remote machine such as your laptop.
- This tutorial uses Ubuntu 16.04 as a base installation, but other distributions are supported by KVM. You'll need to find out how to install