Skip to content

Instantly share code, notes, and snippets.

View jermainewang's full-sized avatar

Minjie Wang jermainewang

View GitHub Profile
import cugraph
import torch
def louvain(dgl_g):
cugraph_g = dgl_g.to_cugraph().to_undirected()
df, _ = cugraph.louvain(cugraph_g, resolution=3)
# revert the node ID renumbering by cugraph
df = cugraph_g.unrenumber(df, 'vertex').sort_values('vertex')
return torch.utils.dlpack.from_dlpack(df['partition'].to_dlpack()).long()
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
@jermainewang
jermainewang / prepare_data.py
Last active July 10, 2019 00:55
AMLC hackathon scripts
import json
import re
import numpy as np
import scipy.sparse as sp
from sklearn.feature_extraction.text import CountVectorizer
top50 = [
'trees',
'buildapc',
'gaming',
@jermainewang
jermainewang / lstm.py
Last active June 11, 2017 02:11
Naive lstm implementation
import mxnet as mx
import mxnet.ndarray as nd
#import minpy.ndarray as nd
context = mx.gpu(0)
def linear(X, W, bias):
return nd.dot(X, W) + bias
def sigmoid(x):
return .5 * (nd.tanh(.5 * x) + 1)
@jermainewang
jermainewang / make_index.cc
Created February 28, 2017 22:31
Make index from record file.
#include <dmlc/io.h>
#include <dmlc/recordio.h>
#include <iostream>
#include <fstream>
using namespace std;
using namespace dmlc;
void print_usage() {
@jermainewang
jermainewang / t1.py
Created November 2, 2016 14:46
Python function call overhead benchmarks
import numpy as np
def foo(x, y):
return np.dot(x, y)
x = np.zeros((256, 512))
y = np.zeros((512, 512))
def example():
for i in range(0,100):
@jermainewang
jermainewang / pg-pong.py
Created July 6, 2016 05:43 — forked from karpathy/pg-pong.py
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
name: "VGG16_RPN_TEST"
input: "data"
input_shape {
dim: 1
dim: 3
dim: 224
dim: 224
}
// Operator classes
class BasicOperator;
template<int num_input, int num_output>
class OperatorTempl : public BasicOperator;
// An user code example
class SomeOperator : public OperatorTempl<1, 2> {
public:
SomeOperator( ... ) { } // any constructor
@jermainewang
jermainewang / decl_v1.cpp
Created April 25, 2015 16:03
Draft operator declaration
DeclareOperator(Name) {
DeclareInput_1(sh) {
return sh.size() == 4;
}
DeclareInput_2(sh) {
return ...
}
DeclareOutput_1 = {24, 24, input1.shape(0)}
DeclareOutput_2 = {1, 1, input1.shape(3), 1}
DeclareKernel(InData* input1, InData* input2, OutData* output1, OutData* output2, Context& ctx) {