Skip to content

Instantly share code, notes, and snippets.

View vashineyu's full-sized avatar

SeanYu vashineyu

  • Taipei, Taiwan
View GitHub Profile
@vashineyu
vashineyu / bin2img.py
Last active October 4, 2018 06:51
special image reader, one-time usage
def bin2img_mix(file_path, channel_first = False):
w, h = 70, 70
with open(file_path, 'rb') as f:
affine_info = np.fromfile(f, dtype='float64', count=6)
img1 = np.fromfile(f, dtype='single',count=245000)
img2 = np.fromfile(f, dtype='single',count=245000)
img2[img2 < -900] = -900
if channel_first:
return img1.reshape(-1, w, h), img2.reshape(-1,w,h), affine_info
#! /usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tf_parameter_mgr
import monitor_cb
from tensorflow.python.framework import ops
#from monitor_cb import CMonitor
class GroupNorm(Layer):
'''Group normalization layer
Group Normalization divides the channels into groups and computes within each group
the mean and variance for normalization. GN's computation is independent of batch sizes,
and its accuracy is stable in a wide range of batch sizes
# Arguments
groups: Integer, the number of groups for Group Normalization.
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
import numpy as np
import os
import pickle as pkl
from tqdm import tqdm
"""
Write data into GPFS
"""
NUM_WRITE_PKL = 100
DATA_PATH = "/mnt/work/debug_data/"
@vashineyu
vashineyu / model.py
Created February 13, 2019 15:05
model.py
# model.py
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.nn as F
class Conv_bn_relu(layers.Layer):
"""Stack blocks of Conv2D->BN->relu.
# model.py
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.nn as F
class Conv_bn_relu(models.Model):
"""Stack blocks of Conv2D->BN->relu.
# model.py
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.nn as F
class Conv_bn_relu(models.Model):
"""Stack blocks of Conv2D->BN->relu.
"""Data generator for model
GetDataset: Get single data with next
Customized dataloader: Compose multiple dataset object together and put them into multi-processing flow
ver1. all patches were taken from single slide util N patches have been taken.
"""
import cv2
import os
import json
def get_large_image(slide_handler, dpl_list, sz = 256):
"""
Given partial coordinate list, open them at once and return a large image.
This is a modification from last version, which dynamically generate patches from slide and predict. However, that method request a interacte with openslide object too frequently, which cause a core-dump. In this version, we read the slide image as a whole then split them with normal operations.
Args:
- slide_handler: openslide object (SLIDE_OPENER object)
- dpl_list: list of coordinates in tuple
- sz: patch size
Return:
- large image: numpy array of partial WSI in high resolution (about 1/n_gpus patches of a WSI)
class GetDataset():
"""Claim Dataset object for inferencing.
Args:
slide_name (str): full path to slide
f_inputs_preproc (function): preprocessing function to array
patch_size (int): patch size of array
stride (int): stride of image when get patches
level (int): get patch at level