REFERENCES FOR LEARNING & USING APPLESCRIPT Modified: 2018/06/19 18:47
AppleScript is a rather peculiar scripting language to learn.
# Line 29 | |
from widerface_val import bbox_vote | |
from warnings import filterwarnings # <---------- | |
filterwarnings("ignore") # <---------- | |
plt.switch_backend('agg') |
{ | |
"NotebookApp": { | |
"open_browser": false, | |
"port": 8887, | |
"ip": "0.0.0.0", | |
"allow_origin": "*", | |
"notebook_dir": "c:\\users\\admin\\", | |
"certfile": "c:\\users\\admin\\.jupyter\\mycert.pem", | |
"keyfile": "c:\\users\\admin\\.jupyter\\mykey.key", | |
"allow_remote_access": true, |
cmake -D CMAKE_BUILD_TYPE=RELEASE \ | |
-D CMAKE_INSTALL_PREFIX=/usr/local \ | |
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \ | |
-D PYTHON3_LIBRARY=`python -c 'import subprocess ; import sys ; s = subprocess.check_output("python-config --configdir", shell=True).decode("utf-8").strip() ; (M, m) = sys.version_info[:2] ; print("{}/libpython{}.{}.dylib".format(s, M, m))'` \ | |
-D PYTHON3_INCLUDE_DIR=`python -c 'import distutils.sysconfig as s; print(s.get_python_inc())'` \ | |
-D PYTHON3_EXECUTABLE=$VIRTUAL_ENV/bin/python \ | |
-D BUILD_opencv_python2=OFF \ | |
-D BUILD_opencv_python3=ON \ | |
-D INSTALL_PYTHON_EXAMPLES=ON \ | |
-D INSTALL_C_EXAMPLES=OFF \ |
{ | |
"NotebookApp": { | |
"open_browser": false, | |
"port": 8888, | |
"ip": "0.0.0.0", | |
"allow_origin": "*", | |
"notebook_dir": "/mnt/c/users/admin/", | |
"certfile": "/home/user/.jupyter/mycert.pem", | |
"keyfile": "/home/user/.jupyter/mykey.key", | |
"allow_remote_access": true, |
parser = argparse.ArgumentParser(description='DSFD:Dual Shot Face Detector') | |
parser.add_argument('--trained_model', default='weights/WIDERFace_DSFD_RES152.pth', | |
type=str, help='Trained state_dict file path to open') | |
parser.add_argument('--save_folder', default='eval_tools/', type=str, | |
help='Dir to save results') | |
parser.add_argument('--visual_threshold', default=0.1, type=float, | |
help='Final confidence threshold') | |
parser.add_argument('--cuda', default=False, type=bool, | |
help='Use cuda to train model') |
from fastai.vision.all import * | |
from fastbook import * | |
file_directory = untar_data(URLs.MNIST_SAMPLE) | |
training_3_file_paths = (file_directory/'train'/'3').ls().sorted() | |
training_3_tensors = [tensor(Image.open(file_path)) for file_path in training_3_file_paths] | |
training_3_tensors = torch.stack(training_3_tensors).float() / 255 | |
training_7_file_paths = (file_directory/'train'/'7').ls().sorted() | |
training_7_tensors = [tensor(Image.open(file_path)) for file_path in training_7_file_paths] |
# sets user to default user for web server | |
user www-data; | |
# sets number of cpu cores to use | |
worker_processes auto; | |
# customizes how to handle connections | |
events { | |
# sets number of connections to use per cpu core |
// Transform > Position > Code | |
var text_composition = comp("Top"); | |
var text_layer = text_composition.layer("Subtitle"); | |
var text_rectangle = text_layer.sourceRectAtTime(); | |
var text_left = text_rectangle.left; | |
var text_top = text_rectangle.top; | |
var text_width = text_rectangle.width; | |
var text_height = text_rectangle.height; |