Skip to content

Instantly share code, notes, and snippets.

@cawfree
Created March 18, 2017 00:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cawfree/bd4c3cd2b6aa37cb2e7cafe25f206973 to your computer and use it in GitHub Desktop.
Save cawfree/bd4c3cd2b6aa37cb2e7cafe25f206973 to your computer and use it in GitHub Desktop.
model null?
model = {MultiLayerNetwork@6104}
defaultConfiguration = {NeuralNetConfiguration@6111} "NeuralNetConfiguration(layer=DenseLayer(super=FeedForwardLayer(super=Layer(layerName=layer0, activationFn=tanh, weightInit=XAVIER, biasInit=0.0, dist=null, learningRate=0.1, biasLearningRate=0.1, learningRateSchedule=null, momentum=0.9, momentumSchedule={}, l1=0.0, l2=0.0, l1Bias=0.0, l2Bias=0.0, dropOut=0.0, updater=NESTEROVS, rho=NaN, epsilon=NaN, rmsDecay=NaN, adamMeanDecay=NaN, adamVarDecay=NaN, gradientNormalization=None, gradientNormalizationThreshold=1.0), nIn=4, nOut=3)), leakyreluAlpha=0.0, miniBatch=true, numIterations=1, maxNumLineSearchIterations=5, seed=1489796271540, optimizationAlgo=STOCHASTIC_GRADIENT_DESCENT, variables=[0_W, 0_b, 1_W, 1_b], stepFunction=null, useRegularization=false, useDropConnect=false, minimize=true, learningRateByParam={}, l1ByParam={}, l2ByParam={}, learningRatePolicy=None, lrPolicyDecayRate=NaN, lrPolicySteps=NaN, lrPolicyPower=NaN, pretrain=false, iterationCount=0)"
epsilon = null
flattenedGradients = null
flattenedParams = {NDArray@6112} "[0.65, -0.64, 0.11, 0.58, -0.09, 0.89, 0.17, 0.69, 0.30, 0.09, 0.15, 0.27, 0.00, 0.00, 0.00, -0.48, -1.51, 0.01, 0.82, -0.60, -0.35, 0.53, -0.32, -0.18, 0.00, 0.00, 0.00]"
gradient = null
initCalled = true
initDone = false
input = null
labels = null
layerIndex = 0
layerMap = {LinkedHashMap@6113} size = 2
layerWiseConfigurations = {MultiLayerConfiguration@6114} "{\n "backprop" : true,\n "backpropType" : "Standard",\n "confs" : [ {\n "iterationCount" : 0,\n "l1ByParam" : {\n "b" : 0.0,\n "W" : 0.0\n },\n "l2ByParam" : {\n "b" : 0.0,\n "W" : 0.0\n },\n "layer" : {\n "dense" : {\n "activationFn" : {\n "TanH" : { }\n },\n "adamMeanDecay" : "NaN",\n "adamVarDecay" : "NaN",\n "biasInit" : 0.0,\n "biasLearningRate" : 0.1,\n "dist" : null,\n "dropOut" : 0.0,\n "epsilon" : "NaN",\n "gradientNormalization" : "None",\n "gradientNormalizationThreshold" : 1.0,\n "l1" : 0.0,\n "l1Bias" : 0.0,\n "l2" : 0.0,\n "l2Bias" : 0.0,\n "layerName" : "layer0",\n "learningRate" : 0.1,\n "learningRateSchedule" : null,\n "momentum" : 0.9,\n "momentumSchedule" : { },\n "nin" : 4,\n "nout" : 3,\n "rho" : "NaN",\n "rmsDecay" : "NaN",\n "updater" : "NESTEROVS",\n "weightInit" : "XAV"
layers = {Layer[2]@6115}
listeners = {ArrayList@6117} size = 0
mask = null
score = 0.0
solver = null
trainingListeners = {ArrayList@6118} size = 0
shadow$_klass_ = {Class@4623} "class org.deeplearning4j.nn.multilayer.MultiLayerNetwork"
shadow$_monitor_ = -2073807350
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment