class ConvSeluSVD(nn.Module): | |
def __init__(self, inputSize, outputSize, stride=1, maxpool=False, ownBasis=False): | |
super(ConvSeluSVD, self).__init__() | |
self.inputSize = inputSize | |
self.outputSize = outputSize | |
self.stride = stride | |
self.params = Parameter( torch.Tensor(outputSize * inputSize, 1,3).normal_(0, .02)) |
### JHW 2018 | |
import numpy as np | |
import umap | |
# This code from the excellent module at: | |
# https://stackoverflow.com/questions/4643647/fast-prime-factorization-module | |
import random |
""" | |
Create train, valid, test iterators for CIFAR-10 [1]. | |
Easily extended to MNIST, CIFAR-100 and Imagenet. | |
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4 | |
""" | |
import torch | |
import numpy as np |
from lasagne.nonlinearities import * | |
from lasagne.layers import Layer | |
class SpatialSoftmaxLayer(Layer): | |
""" | |
Softmax layer that computes the softmax over pixels in the same location, | |
i.e., over the channel axis. This layer will automatically use the CuDNN | |
version of this softmax if it is available. | |
Parameters |
def sample_gumbel(shape, eps=1e-20): | |
"""Sample from Gumbel(0, 1)""" | |
U = tf.random_uniform(shape,minval=0,maxval=1) | |
return -tf.log(-tf.log(U + eps) + eps) | |
def gumbel_softmax_sample(logits, temperature): | |
""" Draw a sample from the Gumbel-Softmax distribution""" | |
y = logits + sample_gumbel(tf.shape(logits)) | |
return tf.nn.softmax( y / temperature) |
There is a web-page where Adam Taylor lists 10 alternative FPGA development languages: http://www.eetimes.com/document.asp?doc_id=1329857 On some languages the OO-word was used ... I commented that the only thing I have seen so far from these languages is that they are (truly) class based but that I haven't seen any real example. Yet, as I did not study them to their deepest extent, having not enough time and too much other work ...
I already use class-based design for my MyHDL work, see my gist https://gist.github.com/josyb/afd84c9a06fdec77f2fd, but this is not OO as none of these classes have been subclassed.
In stead of doing some real work today (Sat Oct 22nd 2016), I decided to give OO in MyHDL a try. You can see the results in the two next files.
''' | |
Created on 29 Dec 2015 | |
@author: Josy | |
''' | |
from __future__ import print_function | |
import random | |
import myhdl |
%{ | |
#include "brainfuck.tab.h" | |
#include <string.h> | |
#include <stdlib.h> | |
void error() | |
{ | |
fprintf(stdout, "Plik niepoprawny pod wzglêdem leksykalnym. Linia: %d\n", yylineno); | |
exit(1); | |
} |