Skip to content

Instantly share code, notes, and snippets.

@vashineyu
Created February 20, 2019 06:24
Show Gist options
  • Save vashineyu/028df1574c9250b48698edb9fe96825f to your computer and use it in GitHub Desktop.
Save vashineyu/028df1574c9250b48698edb9fe96825f to your computer and use it in GitHub Desktop.
def get_large_image(slide_handler, dpl_list, sz = 256):
"""
Given partial coordinate list, open them at once and return a large image.
This is a modification from last version, which dynamically generate patches from slide and predict. However, that method request a interacte with openslide object too frequently, which cause a core-dump. In this version, we read the slide image as a whole then split them with normal operations.
Args:
- slide_handler: openslide object (SLIDE_OPENER object)
- dpl_list: list of coordinates in tuple
- sz: patch size
Return:
- large image: numpy array of partial WSI in high resolution (about 1/n_gpus patches of a WSI)
"""
w_, h_ = zip(*dpl_list)
large_im = [np.array(slide_handler.current_slide.read_region(level=0,
location=(min(w_), sz * i),
size = (max(w_)-min(w_)+sz,
sz)))[:,:,:3] for i in range((max(h_)+sz) // sz)]
return np.concatenate(large_im, axis = 0)
def get_array_for_inference(large_image, sz = 256):
"""
Split a large image into a list, which is corresponding to dpl_list (coordinate_list)
Args:
- large_image: image array from "get_large_image"
- sz: size of a patch
Return:
- im_list: image list (n x w x h x c)
"""
h, w, c = large_image.shape
im_list = []
for i in range(w // sz):
for j in range(h // sz):
im_list.append(large_image[(j*sz):(j+1)*sz, (i*sz):(i+1)*sz, :])
return im_list
def prepare_images_for_inference(slide_handler, dpl_list, target_queue, sz = 256):
"""
Target function for multi-threading that combine get_large_image and get_array_for_infernece
Args:
- slide_handler: SLIDE_OPENER object
- dpl_list: coordinate list
- sz: patch_size
Yield:
- yield combination of [patch_list, coord_list] to target_queue
"""
large_image = get_large_image(slide_handler=slide_handler, dpl_list=dpl_list, sz = sz)
im_for_inference = get_array_for_inference(large_image, sz = sz)
target_queue.put([im_for_inference, dpl_list])
def main():
"""
pass some codes
"""
ww = [i for i in range(0, sli.current_slide.level_dimensions[0][0], sz)]
hh = [i for i in range(0, sli.current_slide.level_dimensions[0][1], sz)]
lst = list(itertools.product(ww,hh))
deploy_list = []
per_gpu_get = (len(ww) // n_gpus) * len(hh)
for i in range(n_gpus):
if i != (n_gpus-1):
deploy_list.append(lst[i*per_gpu_get:(i+1)*per_gpu_get])
else:
deploy_list.append(lst[i*per_gpu_get::])
preparing_work = []
im_queue = queue.Queue()
for i, dpl in enumerate(deploy_list):
preparing_work.append(threading.Thread(target=prepare_images_for_inference,
args=(sli, dpl, im_queue)))
pevents = list()
for t in preparing_work:
t.start()
pevents.append(threading.Event())
for t in preparing_work:
t.join()
print("Split done")
result = [im_queue.get() for _ in range(im_queue.qsize())]
im_for_inference, deploy_list = zip(*result)
"""
pass some codes
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment