Skip to content

Instantly share code, notes, and snippets.

@ZeppLu
Last active September 21, 2020 07:08
Show Gist options
  • Save ZeppLu/cbda6d12d6be9200e347d1417ded1ff7 to your computer and use it in GitHub Desktop.
Save ZeppLu/cbda6d12d6be9200e347d1417ded1ff7 to your computer and use it in GitHub Desktop.

Tested on Jetson Nano, should work on any Jetson device with JetPack installed.

Modified from official Dockerfile.

Machine Learning

Change or unset http_proxy/https_proxy/ALL_PROXY, then

cd /
sudo -H ./pytorch.sh
sudo -H ./tensorflow.sh
sudo -H ./ml.sh

Robot Operating System

./ros_melodic.sh
./ros_catkin.sh
#!/bin/bash
export http_proxy="http://192.168.1.104:10809"
export https_proxy="http://192.168.1.104:10809"
export ALL_PROXY="socks5://192.168.1.104:10808"
#
# setup environment
#
export DEBIAN_FRONTEND=noninteractive
export CUDA_HOME="/usr/local/cuda"
export PATH="/usr/local/cuda/bin:${PATH}"
export LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
export LLVM_CONFIG="/usr/bin/llvm-config-9"
export MAKEFLAGS=-j6
#
# apt packages
#
apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip \
python3-dev \
python3-matplotlib \
build-essential \
gfortran \
git \
cmake \
libopenblas-dev \
liblapack-dev \
libblas-dev \
libhdf5-serial-dev \
hdf5-tools \
libhdf5-dev \
zlib1g-dev \
zip \
libjpeg8-dev \
libopenmpi2 \
openmpi-bin \
openmpi-common \
nodejs \
npm \
protobuf-compiler \
libprotoc-dev \
llvm-9 \
llvm-9-dev \
&& rm -rf /var/lib/apt/lists/*
#
# python packages from TF/PyTorch containers
#
#
# python pip packages
#
pip3 install pybind11 --ignore-installed
pip3 install onnx --verbose
pip3 install scipy --verbose
pip3 install scikit-learn --verbose
pip3 install pandas --verbose
pip3 install pycuda --verbose
pip3 install numba --verbose
#
# restore missing cuDNN headers
#
#RUN ln -s /usr/include/aarch64-linux-gnu/cudnn_v8.h /usr/include/cudnn.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_version_v8.h /usr/include/cudnn_version.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_backend_v8.h /usr/include/cudnn_backend.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_adv_infer_v8.h /usr/include/cudnn_adv_infer.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_adv_train_v8.h /usr/include/cudnn_adv_train.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_cnn_infer_v8.h /usr/include/cudnn_cnn_infer.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_cnn_train_v8.h /usr/include/cudnn_cnn_train.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_ops_infer_v8.h /usr/include/cudnn_ops_infer.h && \
# ln -s /usr/include/aarch64-linux-gnu/cudnn_ops_train_v8.h /usr/include/cudnn_ops_train.h && \
# ls -ll /usr/include/cudnn*
#
# CuPy
#
export CUPY_NVCC_GENERATE_CODE="arch=compute_53,code=sm_53;arch=compute_62,code=sm_62;arch=compute_72,code=sm_72"
export CUB_PATH="/opt/cub"
#ARG CFLAGS="-I/opt/cub"
#ARG LDFLAGS="-L/usr/lib/aarch64-linux-gnu"
git clone https://github.com/NVlabs/cub "$CUB_PATH" && \
git clone -b v8.0.0b4 https://github.com/cupy/cupy cupy && \
cd cupy && \
pip3 install fastrlock && \
python3 setup.py install --verbose && \
cd ../ && \
rm -rf cupy
#RUN pip3 install cupy --verbose
#
# JupyterLab
#
pip3 install jupyter jupyterlab --verbose
#RUN jupyter labextension install @jupyter-widgets/jupyterlab-manager@2
jupyter lab --generate-config
python3 -c "from notebook.auth.security import set_password; set_password('nvidia', '/root/.jupyter/jupyter_notebook_config.json')"
/bin/bash -c "jupyter lab --ip 0.0.0.0 --port 8888 --allow-root &> /var/log/jupyter.log" & \
echo "allow 10 sec for JupyterLab to start @ http://localhost:8888 (password nvidia)" && \
echo "JupterLab logging location: /var/log/jupyter.log (inside the container)" && \
/bin/bash
#!/bin/bash
export http_proxy="http://192.168.10.100:10809"
export https_proxy="http://192.168.10.100:10809"
export ALL_PROXY="socks5://192.168.10.100:10808"
#
# setup environment
#
export DEBIAN_FRONTEND=noninteractive
#
# install prerequisites (many of these are for numpy)
#
apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip \
python3-dev \
libopenblas-dev \
libopenmpi2 \
openmpi-bin \
openmpi-common \
gfortran \
&& rm -rf /var/lib/apt/lists/*
pip3 install setuptools Cython wheel
pip3 install numpy --verbose
#
# PyTorch (for JetPack 4.4 production release)
#
# PyTorch v1.2.0 https://nvidia.box.com/shared/static/lufbgr3xu2uha40cs9ryq1zn4kxsnogl.whl (torch-1.2.0-cp36-cp36m-linux_aarch64.whl)
# PyTorch v1.3.0 https://nvidia.box.com/shared/static/017sci9z4a0xhtwrb4ps52frdfti9iw0.whl (torch-1.3.0-cp36-cp36m-linux_aarch64.whl)
# PyTorch v1.4.0 https://nvidia.box.com/shared/static/c3d7vm4gcs9m728j6o5vjay2jdedqb55.whl (torch-1.4.0-cp36-cp36m-linux_aarch64.whl)
# PyTorch v1.5.0 https://nvidia.box.com/shared/static/3ibazbiwtkl181n95n9em3wtrca7tdzp.whl (torch-1.5.0-cp36-cp36m-linux_aarch64.whl)
#
export PYTORCH_URL="https://nvidia.box.com/shared/static/9eptse6jyly1ggt9axbja2yrmj6pbarc.whl"
export PYTORCH_WHL="torch-1.6.0-cp36-cp36m-linux_aarch64.whl"
wget --quiet --show-progress --progress=bar:force:noscroll --no-check-certificate ${PYTORCH_URL} -O ${PYTORCH_WHL} && \
pip3 install ${PYTORCH_WHL} --verbose && \
echo rm ${PYTORCH_WHL}
#
# torchvision
# - PyTorch v1.0 : torchvision v0.2.2
# - PyTorch v1.1 : torchvision v0.3.0
# - PyTorch v1.2 : torchvision v0.4.0
# - PyTorch v1.3 : torchvision v0.4.2
# - PyTorch v1.4 : torchvision v0.5.0
# - PyTorch v1.5 : torchvision v0.6.0
# - PyTorch v1.6 : torchvision v0.7.0
#
export TORCHVISION_VERSION="v0.7.0"
export PILLOW_VERSION="pillow<7"
export TORCH_CUDA_ARCH_LIST="5.3;6.2;7.2"
echo "torchvision version = $TORCHVISION_VERSION" && echo "pillow version = $PILLOW_VERSION" && echo "TORCH_CUDA_ARCH_LIST = $TORCH_CUDA_ARCH_LIST"
apt-get update && \
apt-get install -y --no-install-recommends \
git \
build-essential \
libjpeg-dev \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
git clone -b ${TORCHVISION_VERSION} https://github.com/pytorch/vision torchvision && \
cd torchvision && \
export BUILD_VERSION="${TORCHVISION_VERSION#v*}" && \
python3 setup.py install && \
cd ../ && \
rm -rf torchvision && \
pip3 install "${PILLOW_VERSION}"
#
# torchaudio
#
export TORCHAUDIO_VERSION="v0.6.0"
apt-get update && \
apt-get install -y --no-install-recommends \
sox \
libsox-dev \
libsox-fmt-all \
&& rm -rf /var/lib/apt/lists/*
git clone -b ${TORCHAUDIO_VERSION} https://github.com/pytorch/audio torchaudio && \
cd torchaudio && \
python3 setup.py install && \
cd ../ && \
rm -rf torchaudio
#
# PyCUDA
#
export PATH="/usr/local/cuda/bin:${PATH}"
export LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
echo "$PATH" && echo "$LD_LIBRARY_PATH"
pip3 install pycuda --verbose
#!/bin/bash
if [ $(id -u) = 0 ]; then
echo "This script should not be run as root!"
exit 1
fi
export ROS_DISTRO="melodic"
# http://wiki.ros.org/catkin
#sudo apt-get install ros-melodic-catkin
# http://wiki.ros.org/catkin/Tutorials/create_a_workspace
source /opt/ros/$ROS_DISTRO/setup.bash
mkdir -p ~/catkin_ws/src
cd ~/catkin_ws/
catkin_make
echo "source $HOME/catkin_ws/devel/setup.bash" >> $HOME/.bashrc
source $HOME/catkin_ws/devel/setup.bash
#!/bin/bash
#
# this dockerfile roughly follows the 'Ubuntu install of ROS Melodic' from:
# http://wiki.ros.org/melodic/Installation/Ubuntu
#
export http_proxy="http://192.168.1.104:10809"
export https_proxy="http://192.168.1.104:10809"
export ROS_PKG=ros_base
export ROS_DISTRO=melodic
export ROS_ROOT=/opt/ros/${ROS_DISTRO}
export DEBIAN_FRONTEND=noninteractive
if [ $(id -u) = 0 ]; then
echo "This script should not be run as root!"
exit 1
fi
# add the ROS deb repo to the apt sources list
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
git \
cmake \
build-essential \
curl \
wget \
gnupg2 \
lsb-release \
&& sudo rm -rf /var/lib/apt/lists/*
echo "deb https://mirrors.tuna.tsinghua.edu.cn/ros/ubuntu/ $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/ros-latest.list
sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
# install ROS packages
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
ros-melodic-ros-base \
ros-melodic-image-transport \
ros-melodic-vision-msgs \
python-rosdep \
python-rosinstall \
python-rosinstall-generator \
python-wstool \
&& sudo rm -rf /var/lib/apt/lists/*
# init/update rosdep
sudo apt-get update && \
cd ${ROS_ROOT} && \
sudo rosdep init && \
rosdep update && \
sudo rm -rf /var/lib/apt/lists/*
# setup entrypoint
#COPY ./packages/ros_entrypoint.sh /ros_entrypoint.sh
echo "source ${ROS_ROOT}/setup.bash" >> "$HOME"/.bashrc
#ENTRYPOINT ["/ros_entrypoint.sh"]
#CMD ["bash"]
source ${ROS_ROOT}/setup.bash
#!/bin/bash
export http_proxy="http://192.168.1.104:10809"
export https_proxy="http://192.168.1.104:10809"
export ALL_PROXY="socks5://192.168.1.104:10808"
#
# setup environment
#
export DEBIAN_FRONTEND=noninteractive
export HDF5_DIR="/usr/lib/aarch64-linux-gnu/hdf5/serial/"
export MAKEFLAGS="-j6"
#
# install prerequisites - https://docs.nvidia.com/deeplearning/frameworks/install-tf-jetson-platform/index.html#prereqs
#
apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip \
python3-dev \
gfortran \
build-essential \
liblapack-dev \
libblas-dev \
libhdf5-serial-dev \
hdf5-tools \
libhdf5-dev \
zlib1g-dev \
zip \
libjpeg8-dev \
&& rm -rf /var/lib/apt/lists/*
pip3 install setuptools Cython wheel
pip3 install numpy --verbose
pip3 install h5py==2.10.0 --verbose
pip3 install future==0.17.1 mock==3.0.5 keras_preprocessing==1.0.5 keras_applications==1.0.8 gast==0.2.2 futures protobuf pybind11 --verbose
#
# TensorFlow (for JetPack 4.4 DP)
#
# TensorFlow 1.15 https://nvidia.box.com/shared/static/rummpy6q1km1wivomalpkwt2jy28mndf.whl (tensorflow-1.15.2+nv-cp36-cp36m-linux_aarch64.whl)
#
export TENSORFLOW_URL="https://nvidia.box.com/shared/static/rummpy6q1km1wivomalpkwt2jy28mndf.whl"
export TENSORFLOW_WHL="tensorflow-1.15.2+nv-cp36-cp36m-linux_aarch64.whl"
wget --quiet --show-progress --progress=bar:force:noscroll --no-check-certificate ${TENSORFLOW_URL} -O ${TENSORFLOW_WHL} && \
pip3 install ${TENSORFLOW_WHL} --verbose && \
rm ${TENSORFLOW_WHL}
#
# PyCUDA
#
export PATH="/usr/local/cuda/bin:${PATH}"
export LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
echo "$PATH" && echo "$LD_LIBRARY_PATH"
pip3 install pycuda --verbose
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment