Skip to content

Instantly share code, notes, and snippets.

@ptrblck
Last active December 22, 2018 00:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ptrblck/ac0dc862f4e1766f0e1036c252cdb105 to your computer and use it in GitHub Desktop.
Save ptrblck/ac0dc862f4e1766f0e1036c252cdb105 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 19:26:32 2018
@author: ptrblck
"""
import torch
import numpy as np
import time
def run_test(x, n):
for dim in range(x.dim()):
print('##return_inverse=True, return_counts=True##')
tu, ti, tc = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=dim)
nu, ni, nc = np.unique(
n,
return_inverse=True,
return_counts=True,
axis=dim)
print('Dim{}: unique values passed: {}'.format(
dim, torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('Dim{}: unique rev indices passed: {}'.format(
dim, torch.equal(ti.cpu(), torch.from_numpy(ni))))
print('Dim{}: unique counts passed: {}'.format(
dim, torch.equal(tc.cpu(), torch.from_numpy(nc))))
print('Dim{}: unique restore passed: {}'.format(
dim, torch.equal(tu.index_select(dim, ti), x)))
print('##return_inverse=True, return_counts=False##')
tu, ti = torch.unique(
x,
return_inverse=True,
return_counts=False,
dim=dim)
nu, ni = np.unique(
n,
return_inverse=True,
return_counts=False,
axis=dim)
print('Dim{}: unique values passed: {}'.format(
dim, torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('Dim{}: unique rev indices passed: {}'.format(
dim, torch.equal(ti.cpu(), torch.from_numpy(ni))))
print('Dim{}: unique restore passed: {}'.format(
dim, torch.equal(tu.index_select(dim, ti), x)))
print('##return_inverse=False, return_counts=True##')
tu, tc = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=dim)
nu, nc = np.unique(
n,
return_inverse=False,
return_counts=True,
axis=dim)
print('Dim{}: unique values passed: {}'.format(
dim, torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('Dim{}: unique counts passed: {}'.format(
dim, torch.equal(tc.cpu(), torch.from_numpy(nc))))
# Run test without dim argument
print('##return_inverse=True, return_counts=True##')
tu, ti, tc = torch.unique(
x,
sorted=True, # otherwise numpy results won't match
return_inverse=True,
return_counts=True)
nu, ni, nc = np.unique(
n,
return_inverse=True,
return_counts=True)
print('NoDim: unique values passed: {}'.format(
torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('NoDim: unique rev indices passed: {}'.format(
torch.equal(ti.cpu().view(-1), torch.from_numpy(ni))))
print('NoDim: unique counts passed: {}'.format(
torch.equal(tc.cpu(), torch.from_numpy(nc))))
print('##return_inverse=True, return_counts=False##')
tu, ti = torch.unique(
x,
sorted=True, # otherwise numpy results won't match
return_inverse=True,
return_counts=False)
nu, ni = np.unique(
n,
return_inverse=True,
return_counts=False)
print('NoDim: unique values passed: {}'.format(
torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('NoDim: unique rev indices passed: {}'.format(
torch.equal(ti.cpu().view(-1), torch.from_numpy(ni))))
print('##return_inverse=False, return_counts=True##')
tu, ti = torch.unique(
x,
sorted=True, # otherwise numpy results won't match
return_inverse=False,
return_counts=True)
nu, ni = np.unique(
n,
return_inverse=False,
return_counts=True)
print('NoDim: unique values passed: {}'.format(
torch.equal(tu.cpu(), torch.from_numpy(nu))))
print('NoDim: unique counts passed: {}'.format(
torch.equal(tc.cpu(), torch.from_numpy(nc))))
def time_cpu(x, n, return_inverse=True, return_counts=True, iterations=100):
torch_times = {}
torch_tmp = []
for dim in range(x.dim()):
for _ in range(iterations):
t0 = time.perf_counter()
torch.unique(
x,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim)
t1 = time.perf_counter()
torch_tmp.append(t1 - t0)
torch_times['dim' + str(dim)] = np.mean(torch_tmp)
numpy_times = {}
numpy_tmp = []
n = x.cpu().numpy()
for dim in range(x.dim()):
for _ in range(iterations):
t0 = time.perf_counter()
np.unique(
n,
return_inverse=return_inverse,
return_counts=return_counts,
axis=dim)
t1 = time.perf_counter()
numpy_tmp.append(t1 - t0)
numpy_times['dim' + str(dim)] = np.mean(numpy_tmp)
return torch_times, numpy_times
def time_gpu(x, n, return_inverse=True, return_counts=True, iterations=100):
torch.cuda.synchronize()
torch.cuda.synchronize()
torch_times = {}
torch_tmp = []
for dim in range(x.dim()):
for _ in range(iterations):
torch.cuda.synchronize()
t0 = time.perf_counter()
torch.unique(
x,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim)
torch.cuda.synchronize()
t1 = time.perf_counter()
torch_tmp.append(t1 - t0)
torch_times['dim' + str(dim)] = np.mean(torch_tmp)
numpy_times = {}
numpy_tmp = []
n = x.cpu().numpy()
for dim in range(x.dim()):
for _ in range(iterations):
t0 = time.perf_counter()
np.unique(
n,
return_inverse=return_inverse,
return_counts=return_counts,
axis=dim)
t1 = time.perf_counter()
numpy_tmp.append(t1 - t0)
numpy_times['dim' + str(dim)] = np.mean(numpy_tmp)
return torch_times, numpy_times
# Test results with numpy
x = torch.randint(2, (100, 100, 10, 10))
n = x.numpy()
run_test(x, n)
x = torch.randint(2, (1000, 1000))
n = x.numpy()
run_test(x, n)
x = torch.randn(1000, 1000)
n = x.numpy()
run_test(x, n)
# Time CPU version
x = torch.randint(2, (1000, 1000))
n = x.numpy()
pt_time_cpu, np_time_cpu = time_cpu(
x,
n,
return_inverse=False,
return_counts=False)
print('PyTorch, return_inverse=False, return_counts=False, CPU\n', pt_time_cpu)
print('Numpy, return_inverse=False, return_counts=False\n', np_time_cpu)
pt_time_ret_cpu, np_time_ret_cpu = time_cpu(
x,
n,
return_inverse=True,
return_counts=True)
print('PyTorch, return_inverse=True, return_counts=True, CPU\n', pt_time_ret_cpu)
print('Numpy, return_inverse=True, return_counts=True\n', np_time_ret_cpu)
if torch.cuda.is_available():
device = 'cuda:0'
# Test results on GPU with numpy
x = torch.randint(2, (100, 100, 10, 10), device=device)
n = x.cpu().numpy()
run_test(x, n)
x = torch.randint(2, (1000, 1000), device=device)
n = x.cpu().numpy()
run_test(x, n)
x = torch.randn(1000, 1000, device=device)
n = x.cpu().numpy()
run_test(x, n)
# Time GPU version
x = torch.randint(2, (1000, 1000), device=device)
n = x.cpu().numpy()
pt_time_gpu, np_time_cpu = time_gpu(
x,
n,
return_inverse=False,
return_counts=False)
print('PyTorch, return_inverse=False, return_counts=False, GPU\n', pt_time_gpu)
print('Numpy, return_inverse=False, return_counts=False\n', np_time_cpu)
pt_time_ret_gpu, np_time_ret_cpu = time_gpu(
x,
n,
return_inverse=True,
return_counts=True)
print('PyTorch, return_inverse=True, return_counts=True GPU\n', pt_time_ret_gpu)
print('Numpy, return_inverse=True, return_counts=True\n', np_time_ret_cpu)
pt_time_ret_gpu, np_time_ret_cpu = time_gpu(
x,
n,
return_inverse=True,
return_counts=False)
print('PyTorch, return_inverse=True, return_counts=False GPU\n', pt_time_ret_gpu)
print('Numpy, return_inverse=True, return_counts=False\n', np_time_ret_cpu)
pt_time_ret_gpu, np_time_ret_cpu = time_gpu(
x,
n,
return_inverse=False,
return_counts=True)
print('PyTorch, return_inverse=False, return_counts=True GPU\n', pt_time_ret_gpu)
print('Numpy, return_inverse=False, return_counts=True\n', np_time_ret_cpu)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment