Skip to content

Instantly share code, notes, and snippets.

🔨
Fixing things

Peter Goldsborough goldsborough

View GitHub Profile
@goldsborough
goldsborough / download_mnist.py
Created Jan 8, 2019
Python script to download the MNIST dataset
View download_mnist.py
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import urllib
try:
View minimal.cpp
#include <torch/torch.h>
torch::nn::Linear model(num_features, 1);
torch::optim::SGD optimizer(model->parameters());
auto data_loader = torch::data::data_loader(dataset);
for (size_t epoch = 0; epoch < 10; ++epoch) {
for (auto batch : data_loader) {
auto prediction = model->forward(batch.data);
auto loss = loss_function(prediction, batch.target);
View gist:265a13be401578611d56a9cd4cdf12f6
- Old-style factory functions that accept a type as first argument and size as second argument have been removed. New-style factory functions accepting the size as first argument and [`TensorOptions`](https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/core/TensorOptions.h#L14) as last argument should be used instead. For example, replace your call e.g. to `at::ones(torch::CPU(at::kFloat)), {2, 3})` with `torch::ones({2, 3}, at::kCPU)`. This applies to the following functions:
- `arange`
- `empty`
- `eye`
- `full`
- `linspace`
- `logspace`
- `ones`
- `rand`
- `randint`
View pytorch-download-links.md
View sema.cpp
class Semaphore {
public:
void post() {
std::lock_guard<std::mutex> lock(mutex_);
count_ += 1;
cv_.notify_one();
}
void shutdown() {
std::lock_guard<std::mutex> lock(mutex_);
@goldsborough
goldsborough / data_loader.cpp
Created Sep 19, 2018
Stream/Random policy data loader
View data_loader.cpp
namespace torch {
namespace data {
template <typename D = torch::Tensor, typename L = torch::Tensor>
struct Example {
D data;
L label;
};
template <typename D>
View dcgan.py
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
View dcgan.cpp
#include <torch/torch.h>
#include "mnist_reader.h"
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
#include <vector>
View gist:77322103fb42cf4677588f9364367212
insert_at_end_unordered_set_small_string(100000 14.41ms 69.42
insert_at_end_sorted_vector_set_small_string(10 353.95ns 2.83M
insert_at_end_sorted_vector_set_small_string(10 7.74us 129.14K
insert_at_end_sorted_vector_set_small_string(10 791.95us 1.26K
insert_at_end_sorted_vector_set_small_string(10 65.09ms 15.36
insert_at_end_sorted_vector_set_small_string(10 6.90s 144.85m
insert_at_end_unordered_set_large_string(100000 27.17ms 36.81
insert_at_end_sorted_vector_set_large_string(10 1.07us 932.14K
insert_at_end_sorted_vector_set_large_string(10 16.40us 60.99K
insert_at_end_sorted_vector_set_large_string(10 507.14us 1.97K
@goldsborough
goldsborough / any.cpp
Created May 10, 2018
Basic implementation of Any
View any.cpp
class Any {
public:
template<typename T>
explicit Any(T&& value) : content_(new Holder<T>(std::forward<T>(value))) {}
Any(const Any& other) : content_(other.content_->clone()) {}
Any(Any&& other) { swap(other); }
You can’t perform that action at this time.