Skip to content

Instantly share code, notes, and snippets.

@kounoike
Created January 2, 2021 18:49
Show Gist options
  • Save kounoike/52d98744dfa750bc77e4cf7738a7eb7f to your computer and use it in GitHub Desktop.
Save kounoike/52d98744dfa750bc77e4cf7738a7eb7f to your computer and use it in GitHub Desktop.
UniverseNet+manga109
ARG PYTORCH="1.5"
ARG CUDA="10.1"
ARG CUDNN="7"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
RUN apt-get update && apt-get install -y --no-install-recommends \
git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
wget libgl1-mesa-glx \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install MMCV
RUN pip install mmcv-full==1.1.6+torch1.5.0+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html
# Install MMDetection
RUN conda clean --all
RUN git clone https://github.com/shinya7y/UniverseNet.git /universenet
WORKDIR /universenet/configs/manga109
RUN wget -q https://github.com/shinya7y/UniverseNet/releases/download/20.12/universenet50_2008_fp16_4x4_mstrain_480_960_1x_manga109s_20201220_epoch_12-6af914a4.pth
WORKDIR /universenet
ENV FORCE_CUDA="1"
RUN pip install -r requirements/build.txt
RUN pip install --no-cache-dir -e .
COPY run_img.py .
from mmdet.apis import init_detector, inference_detector
import mmcv
config_file = '/universenet/configs/manga109/universenet50_2008_fp16_4x4_mstrain_480_960_1x_manga109s.py'
checkpoint_file = '/universenet/configs/manga109/universenet50_2008_fp16_4x4_mstrain_480_960_1x_manga109s_20201220_epoch_12-6af914a4.pth'
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image and show the results
img = 'input.png' # or img = mmcv.imread(img), which will only load it once
result = inference_detector(model, img)
# visualize the results in a new window
model.show_result(img, result)
# or save the visualization results to image files
model.show_result(img, result, out_file='result.png')
from mmdet.apis import init_detector, inference_detector
import mmcv
import logging
logging.basicConfig(level=logging.INFO)
config_file = '/universenet/configs/manga109/universenet50_2008_fp16_4x4_mstrain_480_960_1x_manga109s.py'
checkpoint_file = '/universenet/configs/manga109/universenet50_2008_fp16_4x4_mstrain_480_960_1x_manga109s_20201220_epoch_12-6af914a4.pth'
logging.info("loading model")
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
logging.info("load done.")
# test a video and show the results
logging.info("reading mp4")
video = mmcv.VideoReader('input.mp4')
logging.info("read done.")
for idx, frame in enumerate(video):
if idx % 100 == 0:
logging.info(f"{idx} frame / {len(video)}")
result = inference_detector(model, frame)
model.show_result(frame, result, out_file=f'result/{idx:06d}.png')
@kounoike
Copy link
Author

kounoike commented Jan 2, 2021

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment