Skip to content

Instantly share code, notes, and snippets.

@fepegar
Last active June 8, 2021 16:47
Show Gist options
  • Save fepegar/53f81de209eed8e90aa8b73675295b51 to your computer and use it in GitHub Desktop.
Save fepegar/53f81de209eed8e90aa8b73675295b51 to your computer and use it in GitHub Desktop.
from pathlib import Path
import torch
import torchvision
import numpy as np
from PIL import Image
import torchio as tio
from tqdm import trange
output_dir = Path('/tmp/transformed')
output_dir.mkdir(exist_ok=True)
image = tio.datasets.FPG().t1
image = tio.ToCanonical()(image)
image = tio.Resample(0.5)(image)
rescale = tio.RescaleIntensity((0, 255), (1, 99))
composed_transform = tio.Compose((
tio.OneOf({
tio.RandomElasticDeformation(): 2,
tio.RandomAffine(): 8,
}),
tio.RandomAnisotropy(p=0.2),
tio.RandomBiasField(p=0.2),
tio.RandomBlur(p=0.1),
tio.RandomGamma(p=0.1),
tio.RandomNoise(p=0.1),
tio.OneOf((
tio.RandomGhosting(),
tio.RandomMotion(),
tio.RandomSpike(),
),
p=0.1,
),
rescale,
))
spacing = image.spacing[0]
rows = 1
for i in trange(6):
transform = rescale if i == 0 else composed_transform
num_images = rows ** 2
images = []
for j in trange(num_images, leave=False):
transformed = transform(image)
si, sj, sk = transformed.spatial_shape
slice = transformed.numpy()[0, si//2, :, :].astype(np.uint8)
slice = np.rot90(slice)[np.newaxis]
images.append(torch.from_numpy(slice.copy()))
tensor = torch.stack(images)
grid = torchvision.utils.make_grid(
tensor,
nrow=int(np.sqrt(num_images)),
padding=0,
)
name = f'{i}.png'
path = output_dir / name
Image.fromarray(grid.permute(1, 2, 0).numpy()).save(path)
old_spacing = image.spacing[0]
spacing = 2 * spacing
downsample = tio.Compose((
tio.Blur(tio.Resample.get_sigma(2, old_spacing)),
tio.Resample(spacing),
))
image = downsample(image)
rows *= 2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment