Skip to content

Instantly share code, notes, and snippets.

View farukcankaya's full-sized avatar

Faruk Cankaya farukcankaya

View GitHub Profile
@farukcankaya
farukcankaya / mount-s3-bucket.md
Last active January 9, 2022 12:12
Mount S3 bucket to EC2 instance via S3fs using FUSE

Mount S3 Bucket to AWS EC2 Instance

  1. [Connect][ec2-connect] to the EC2 instance.
  2. Update package manager to be able to find s3fs: sudo apt-get update
  3. Install s3fs and awscli: sudo apt install s3fs awscli -y
    which s3fs
    /usr/bin/s3fs
  4. Setup access(if you do NOT have credentials please check the [Give permission to EC2 to access S3][permission] section below ):
@farukcankaya
farukcankaya / ImageConverter.js
Last active April 18, 2022 09:29
Using custom/complex imagemagick command with GraphicsMagick for node
"use strict";
const ImageData = require("./ImageData");
const ImageConverterOperator = require("./ImageConverterOperator");
const gm = require("gm").subClass({imageMagick: true})
/**
* Get enable to use memory size in ImageMagick
* Typically we determine to us 90% of max memory size
* @see https://docs.aws.amazon.com/lambda/latest/dg/lambda-environment-variables.html
@farukcankaya
farukcankaya / check_consumers.sh
Created May 31, 2022 16:57
Check number of consumers in given consumer group with CLI
#!/bin/bash
# Usage
# Make install_kafka.sh and check_consumers.sh executable via `chmod +x _file_`.
# Then, run the script:
# ./check_consumers.sh -g 'consumer.group.retry' -b 'localhost:9092,localhost:9093'
while getopts g:b: flag
do
def prepare_validation_loader(cfg):
val_cfg = cfg.clone()
val_cfg.defrost()
val_cfg.DATASETS.TRAIN = val_cfg.DATASETS.TEST
val_cfg.freeze()
val_mapper = DatasetMapper(val_cfg, True, augmentations=[])
return iter(build_detection_train_loader(val_cfg, mapper=val_mapper))
def calculate_validation_loss(validation_dataset_loader, model, storage):
data = next(validation_dataset_loader)
with torch.no_grad():
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {"val_" + k: v.item() for k, v in
comm.reduce_dict(loss_dict).items()}
# Only 'ADDED NEW' commented statements are added for validation loss calculation.
validation_data_loader = prepare_validation_loader(cfg) # ADDED NEW
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
previous_val_map = 0 # ADDED NEW
no_improvement_for_epochs = 0 # ADDED NEW
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
storage.iter = iteration
...
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
@ROI_MASK_HEAD_REGISTRY.register()
class CustomMaskRCNNConvUpsampleHead(MaskRCNNConvUpsampleHead):
@configurable
def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs):
super().__init__(input_shape, num_classes=num_classes, conv_dims=conv_dims, conv_norm=conv_norm, **kwargs)
self.vis_period = 0
import copy
import logging
import numpy as np
import torch
from PIL import Image, ImageDraw
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.structures import Instances
from detectron2.utils import comm
def do_train_visualization(visualizer, model, inputs):
training_mode = model.training
model.eval()
outputs = visualizer.inference(model, inputs)
visualizer.process(inputs, outputs)
model.train(training_mode)
with EventStorage(start_iter) as storage: