Skip to content

Instantly share code, notes, and snippets.

Kris Singh kris-singh

Block or report user

Report or block kris-singh

Hide content and notifications from this user.

Learn more about blocking users

Contact Support about this user’s behavior.

Learn more about reporting abuse

Report abuse
View GitHub Profile
View temp.cpp
#include <torch/torch.h>
#include <iostream>
#include <ATen/Parallel.h>
#include <ATen/Aten.h>
using namespace at;
// using namespace torch;
void submodular_select(Tensor candidate_points, Tensor features_done, Tensor features)
{
@kris-singh
kris-singh / backward_grad.py
Created Jan 30, 2019
CrudeImplementation.
View backward_grad.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
def backward_hook_function(grad_out):
print(grad_out.shape)
print(grad_out.norm())
# print("grad_norm", grad_in.norm())
View csrc_cpu_soft_nms_cpu.cpp
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include "cpu/vision.h"
template <typename scalar_t>
at::Tensor soft_nms_cpu_kernel(const at::Tensor& dets,
at::Tensor& scores,
const float threshold) {
AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
View temp2.py
class SelectLoss:
"""
Selection based on Loss values of samples.
No need of rejection sampling.
"""
def __init__(self, X, Y, fwd_batch_size, batch_size, _, loss):
"""
:param loss: loss function
:param x_train: training dataN
View temp.py
class SelectLoss:
"""
Selection based on Loss values of samples.
No need of rejection sampling.
"""
def __init__(self, X, Y, fwd_batch_size, batch_size, _, loss):
"""
:param loss: loss function
:param x_train: training dataN
View temp.py
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import argparse
import keras
import gzip
import tensorflow as tf
import pickle
import pandas as pd
View normal_cifar.cpp
#include <mlpack/core.hpp>
#include <mlpack/core/optimizers/rmsprop/rmsprop.hpp>
#include <mlpack/methods/ann/init_rules/gaussian_init.hpp>
#include <mlpack/methods/ann/rbm/spike_slab_rbm_policy.hpp>
#include <mlpack/methods/ann/rbm.hpp>
#include <mlpack/methods/ann/rbm/binary_rbm_policy.hpp>
#include <mlpack/methods/softmax_regression/softmax_regression.hpp>
#include <mlpack/core/optimizers/minibatch_sgd/minibatch_sgd.hpp>
#include <mlpack/core/optimizers/sgd/sgd.hpp>
View cifar.cpp
#include <mlpack/core.hpp>
#include <mlpack/core/optimizers/rmsprop/rmsprop.hpp>
#include <mlpack/methods/ann/init_rules/gaussian_init.hpp>
#include <mlpack/methods/ann/rbm/spike_slab_rbm_policy.hpp>
#include <mlpack/methods/ann/rbm.hpp>
#include <mlpack/methods/ann/rbm/binary_rbm_policy.hpp>
#include <mlpack/methods/softmax_regression/softmax_regression.hpp>
#include <mlpack/core/optimizers/minibatch_sgd/minibatch_sgd.hpp>
#include <mlpack/core/optimizers/sgd/sgd.hpp>
View gan_conv.cpp
#include <mlpack/core.hpp>
#include <mlpack/core/util/cli.hpp>
#include <mlpack/methods/ann/init_rules/gaussian_init.hpp>
#include <mlpack/methods/ann/layer/layer_types.hpp>
#include <mlpack/methods/ann/layer/layer.hpp>
#include <mlpack/methods/ann/gan.hpp>
#include <mlpack/methods/softmax_regression/softmax_regression.hpp>
#include <mlpack/core/optimizers/minibatch_sgd/minibatch_sgd.hpp>
View final.md

Title: Deep Learning Modules

Summary Date: 2017-08-26 22:05:00

Tags: gsoc, rbm, ssRBM, deep learning

Author: Kris Singh

Goals for the summer


You can’t perform that action at this time.