Created
June 7, 2019 14:37
-
-
Save sbalnojan/fdc7ec803b45d028f6e71709064fec78 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os, sys | |
sys.path.append(os.path.join(os.getcwd(), "keras-deep-graph-learning")) # Adding the submodule to the module search path | |
sys.path.append(os.path.join(os.getcwd(), "keras-deep-graph-learning/examples")) # Adding the submodule to the module search path | |
import numpy as np | |
from keras.layers import Dense, Activation, Dropout | |
from keras.models import Model, Sequential | |
from keras.regularizers import l2 | |
from keras.optimizers import Adam | |
from keras_dgl.layers import GraphCNN | |
import keras.backend as K | |
from keras.utils import to_categorical | |
print("Creating our simple sample data...") | |
A = np.array([[0,1,5], [1,0,0], [5,0,0]]) | |
print(A) | |
X = np.array([[1,2,10], [4,3,10], [0,2,11]]) # features, whatever we have there... | |
# Notice, if we set A = identity matrix, then we'd effectively assume no edges and just do a basic | |
# MLP on the features. | |
# We could do the same by setting the graph_conv_filter below to Id. | |
# We could also set X to Id, and thus effectively assume no features, and in this way | |
# do an "edge" embedding, so effectively try to understand what's connected to what. | |
# We could then use that as feature in any way we like... | |
Y_o_dim = np.array([1,2,1]) | |
Y = to_categorical(Y_o_dim) # labels, whatever we wanna classify things into... in categorical form. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment