Skip to content

Instantly share code, notes, and snippets.

View john-rocky's full-sized avatar

Daisuke Majima john-rocky

View GitHub Profile
// Usage:
// let mlMultiArray:MLMultiArray = uiImage.mlMultiArray()
//
// or if you need preprocess ...
// let preProcessedMlMultiArray:MLMultiArray = uiImage.mlMultiArray(scale: 127.5, rBias: -1, gBias: -1, bBias: -1)
//
// or if you have gray scale image ...
// let grayScaleMlMultiArray:MLMultiArray = uiImage.mlMultiArrayGrayScale()
extension UIImage {
python3 demo_eval.py --image_dir demo_images --image_name my_image.jpg --input_color_space sRGB
pip install torch==0.4.1.post2 torchvision===0.2.2 -f https://download.pytorch.org/whl/torch_stable.html
pip install -r requirements
cd trilinear_c
sh make.sh
python models/export.py --weight yolov7.pt
python train.py --workers 8 --device 0 --batch-size 32 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml
python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inference/images/horses.jpg
git clone https://github.com/chaofengc/FeMaSR.git
cd FeMaSR
pip install -r requirements.txt
python setup.py develop
python inference_femasr.py -s 4 -i ./testset -o results_x4/
git clone https://github.com/vis-opt-group/SCI.git
cd SCI
python test.py --data_path data/mydata --model './weights/easy.pt'
img_src = cv2.imread('image.jpg', 1)
img = adjust_gamma(img_src, gamma=1.0)
from __future__ import print_function
import numpy as np
import argparse
import cv2
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")