Skip to content

Instantly share code, notes, and snippets.

@mckunkel
Last active September 25, 2018 09:47
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mckunkel/06fcc34ec5455f19ac56150f6d4cbc3b to your computer and use it in GitHub Desktop.
Save mckunkel/06fcc34ec5455f19ac56150f6d4cbc3b to your computer and use it in GitHub Desktop.
Complete Stacktrace for Yolo attempt
package faultrecordreader;
import static org.nd4j.linalg.indexing.NDArrayIndex.all;
import static org.nd4j.linalg.indexing.NDArrayIndex.point;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.datavec.api.conf.Configuration;
import org.datavec.api.records.Record;
import org.datavec.api.records.listener.RecordListener;
import org.datavec.api.records.metadata.RecordMetaData;
import org.datavec.api.records.metadata.RecordMetaDataURI;
import org.datavec.api.records.reader.RecordReader;
import org.datavec.api.split.InputSplit;
import org.datavec.api.writable.Writable;
import org.datavec.api.writable.batch.NDArrayRecordBatch;
import org.datavec.image.util.ImageUtils;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.indexing.INDArrayIndex;
import faults.Fault;
import faults.FaultFactory;
import faults.FaultNames;
/**
* An fault record reader for object detection.
* <p>
* Format of returned values: 4d array, with dimensions [minibatch, 4+C, h, w]
* Where the image is quantized into h x w grid locations.
* <p>
* Note that this is a modified version of Alex Black's
* ObjectDetectionRecordReader.java
*
* @author Michael C. Kunkel
*/
public class FaultObjectDetectionRecordReader implements RecordReader {
private final int gridW;
private final int gridH;
private int height;
private int width;
private int channels;
protected List<String> labels;
protected FaultFactory factory = null;
private int label;
// args for FaultFactory constructor
private int superLayer;
private int maxFaults;
private FaultNames desiredFault;
private boolean singleFaultGeneration;
private boolean blurredFaults;
/**
*
* @param height
* Height of the output images
* @param width
* Width of the output images
* @param channels
* Number of channels for the output images
* @param gridH
* Grid/quantization size (along height dimension) - Y axis
* @param gridW
* Grid/quantization size (along height dimension) - X axis
* @param labelProvider
* ImageObjectLabelProvider - used to look up which objects are
* in each image
*/
public FaultObjectDetectionRecordReader(int superLayer, int maxFaults, FaultNames desiredFault,
boolean singleFaultGeneration, boolean blurredFaults, int height, int width, int channels, int gridH,
int gridW) {
this.superLayer = superLayer;
this.maxFaults = maxFaults;
this.desiredFault = desiredFault;
this.singleFaultGeneration = singleFaultGeneration;
this.blurredFaults = blurredFaults;
this.factory = new FaultFactory(superLayer, maxFaults, desiredFault, singleFaultGeneration, blurredFaults);
this.height = height;
this.width = width;
this.channels = channels;
this.gridW = gridW;
this.gridH = gridH;
initialize();
}
private void initialize() {
Set<String> labelSet = new HashSet<>();
/**
* OK, we need all the faults loaded at once otherwise it doesn't make
* sense with the one-hot representation
*/
for (FaultNames d : FaultNames.values()) {
labelSet.add(d.getSaveName());
}
labels = new ArrayList<>(labelSet);
// To ensure consistent order for label assignment (irrespective of file
// iteration order), we want to sort the list of labels
Collections.sort(labels);
}
@Override
public boolean batchesSupported() {
// I might want to set this to true so that I train in batches, reduces
// memory
// will get back to this after impl of modes
return true;
}
@Override
public boolean hasNext() {
// since this is batch mode, and the data is generated on the fly, this
// should always be true
return true;
}
@Override
public List<Writable> next() {
return next(1).get(0);
}
@Override
public void reset() {
this.factory = new FaultFactory(this.superLayer, this.maxFaults, this.desiredFault, this.singleFaultGeneration,
this.blurredFaults);
}
@Override
public boolean resetSupported() {
// Why would we need to reset in this type of training?
return true;
}
@Override
public List<List<Writable>> next(int num) {
List<INDArray> faultData = new ArrayList<>(num);
List<List<Fault>> objects = new ArrayList<>(num);
for (int i = 0; i < num && hasNext(); i++) {
faultData.add(factory.getFeatureVectorAsMatrix());
// faultData.add(factory.getFeatureVector());
objects.add(factory.getFaultList());
}
int nClasses = labels.size();
INDArray outImg = Nd4j.create(faultData.size(), channels, height, width);
INDArray outLabel = Nd4j.create(faultData.size(), 4 + nClasses, gridH, gridW);
int exampleNum = 0;
for (int i = 0; i < faultData.size(); i++) {
INDArray imageFile = faultData.get(i);
outImg.put(new INDArrayIndex[] { point(exampleNum), all(), all(), all() }, imageFile);
List<Fault> objectsThisImg = objects.get(exampleNum);
label(imageFile, objectsThisImg, outLabel, exampleNum);
exampleNum++;
}
reset();
return new NDArrayRecordBatch(Arrays.asList(outImg, outLabel));
}
private void label(INDArray image, List<Fault> objectsThisImg, INDArray outLabel, int exampleNum) {
int oW = image.columns(); // should be 6
int oH = image.rows(); // should be 112
int W = oW;
int H = oH;
// put the label data into the output label array
for (Fault io : objectsThisImg) {
/**
* OK here is a little nuance. The locations of the Faults are in
* natural CLAS x->wires; y->layers coordinates. But the featured
* data itself is in columns->x->layers; rows->y->wires so we should
* SWITCH XCenter <-> YCenter XMin <-> YMin XMax <-> YMax
*
*/
double cx = io.getFaultCoordinates().getYCenterPixels();
double cy = io.getFaultCoordinates().getXCenterPixels();
double[] cxyPostScaling = ImageUtils.translateCoordsScaleImage(cx, cy, W, H, width, height);
double[] tlPost = ImageUtils.translateCoordsScaleImage(io.getFaultCoordinates().getyMin(),
io.getFaultCoordinates().getxMin(), W, H, width, height);
double[] brPost = ImageUtils.translateCoordsScaleImage(io.getFaultCoordinates().getyMax(),
io.getFaultCoordinates().getxMax(), W, H, width, height);
// Get grid position for image
int imgGridX = (int) (cxyPostScaling[0] / width * gridW);
int imgGridY = (int) (cxyPostScaling[1] / height * gridH);
// Convert pixels to grid position, for TL and BR X/Y
tlPost[0] = tlPost[0] / width * gridW;
tlPost[1] = tlPost[1] / height * gridH;
brPost[0] = brPost[0] / width * gridW;
brPost[1] = brPost[1] / height * gridH;
// Put TL, BR into label array:
outLabel.putScalar(exampleNum, 0, imgGridY, imgGridX, tlPost[0]);
outLabel.putScalar(exampleNum, 1, imgGridY, imgGridX, tlPost[1]);
outLabel.putScalar(exampleNum, 2, imgGridY, imgGridX, brPost[0]);
outLabel.putScalar(exampleNum, 3, imgGridY, imgGridX, brPost[1]);
// Put label class into label array: (one-hot representation)
int labelIdx = labels.indexOf(io.getSubFaultName().getSaveName());
outLabel.putScalar(exampleNum, 4 + labelIdx, imgGridY, imgGridX, 1.0);
}
}
@Override
public Record nextRecord() {
List<Writable> list = next();
URI uri = URI.create("FaultFinderAI");
// return new org.datavec.api.records.impl.Record(list, metaData)
return new org.datavec.api.records.impl.Record(list,
new RecordMetaDataURI(null, FaultObjectDetectionRecordReader.class));
}
// the rest below here are not needed, but kept for as need
@Override
public void close() throws IOException {
// TODO Auto-generated method stub
}
@Override
public Configuration getConf() {
// TODO Auto-generated method stub
return null;
}
@Override
public void setConf(Configuration arg0) {
// TODO Auto-generated method stub
}
@Override
public List<String> getLabels() {
// TODO Auto-generated method stub
return null;
}
@Override
public List<RecordListener> getListeners() {
// TODO Auto-generated method stub
return null;
}
@Override
public void initialize(InputSplit arg0) throws IOException, InterruptedException {
// TODO Auto-generated method stub
}
@Override
public void initialize(Configuration arg0, InputSplit arg1) throws IOException, InterruptedException {
// TODO Auto-generated method stub
}
@Override
public Record loadFromMetaData(RecordMetaData arg0) throws IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Record> loadFromMetaData(List<RecordMetaData> arg0) throws IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Writable> record(URI arg0, DataInputStream arg1) throws IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public void setListeners(RecordListener... arg0) {
// TODO Auto-generated method stub
}
@Override
public void setListeners(Collection<RecordListener> arg0) {
// TODO Auto-generated method stub
}
}
public static ComputationGraph computationGraphModelYolo(int numLabels) {
int nBoxes = 5;
int numClasses = 14;
double[][] priorBoxes = { { 1.08, 1.19 }, { 3.42, 4.41 }, { 6.63, 11.38 }, { 9.42, 5.11 }, { 16.62, 10.52 } };
INDArray priors = Nd4j.create(priorBoxes);
double lambdaNoObj = 0.5;
double lambdaCoord = 1.0;
GraphBuilder graphBuilder = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Adam()).graphBuilder()
.addInputs("input").setInputTypes(InputType.convolutional(112, 6, 1))
.addLayer("cnn1",
new ConvolutionLayer.Builder(3, 2).nIn(1).stride(1, 1).convolutionMode(ConvolutionMode.Same)
.nOut(40).activation(new ActivationReLU()).build(),
"input")
.addLayer("cnn2",
new ConvolutionLayer.Builder(2, 2).nIn(40).stride(1, 1).convolutionMode(ConvolutionMode.Same)
.nOut(30).activation(new ActivationReLU()).build(),
"cnn1")
.addLayer("pool1",
new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).stride(2, 2)
.build(),
"cnn2")
.addLayer("cnn3",
new ConvolutionLayer.Builder(2, 2).nIn(30).stride(1, 1).convolutionMode(ConvolutionMode.Same)
.nOut(20).activation(new ActivationReLU()).build(),
"pool1")
.addLayer("cnn4",
new ConvolutionLayer.Builder(1, 1).nIn(20).nOut(nBoxes * (5 + numClasses))
.weightInit(WeightInit.XAVIER).stride(1, 1).convolutionMode(ConvolutionMode.Same)
.weightInit(WeightInit.RELU).activation(Activation.IDENTITY).build(),
"cnn3")
.addLayer("outputs",
new Yolo2OutputLayer.Builder().lambbaNoObj(lambdaNoObj).lambdaCoord(lambdaCoord)
.boundingBoxPriors(priors).build(),
"cnn4")
.setOutputs("outputs").backprop(true).pretrain(false);
ComputationGraph neuralNetwork = new ComputationGraph(graphBuilder.build());
// initialize the network
neuralNetwork.init();
System.out.println(neuralNetwork.summary(InputType.convolutional(112, 6, 1)));
return neuralNetwork;
}
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/Users/Mike/.m2/repository/org/slf4j/slf4j-simple/1.7.25/slf4j-simple-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/Users/Mike/.m2/repository/ch/qos/logback/logback-classic/1.1.3/logback-classic-1.1.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/Users/Mike/.m2/repository/org/jlab/coat/coat-libs/5.1-SNAPSHOT/coat-libs-5.1-SNAPSHOT.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.SimpleLoggerFactory]
[main] INFO org.nd4j.linalg.factory.Nd4jBackend - Loaded [CpuBackend] backend
[main] WARN org.nd4j.versioncheck.VersionCheck - *** ND4J VERSION CHECK FAILED - INCOMPATIBLE VERSIONS FOUND ***
[main] WARN org.nd4j.versioncheck.VersionCheck - Incompatible versions (different version number) of DL4J, ND4J, RL4J, DataVec, Arbiter are unlikely to function correctly
[main] INFO org.nd4j.versioncheck.VersionCheck - Versions of artifacts found on classpath:
[main] INFO org.nd4j.versioncheck.VersionCheck - org.datavec : datavec-api : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.datavec : datavec-data-codec : 0.9.1
[main] INFO org.nd4j.versioncheck.VersionCheck - org.datavec : datavec-data-image : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-core : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-datasets : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-datavec-iterators : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-modelimport : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-nlp : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-nn : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-play_2.11 : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-tsne : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-ui-components : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-ui-model : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-ui-resources : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-ui_2.11 : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-util : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-utility-iterators : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : deeplearning4j-zoo : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.deeplearning4j : nearestneighbor-core : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : jackson : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-api : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-base64 : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-buffer : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-common : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-context : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-jackson : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-native-api : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-native-platform : 1.0.0-beta
[main] INFO org.nd4j.versioncheck.VersionCheck - org.nd4j : nd4j-native : 1.0.0-beta
[main] INFO org.nd4j.nativeblas.NativeOpsHolder - Number of threads used for NativeOps: 2
[main] INFO org.nd4j.nativeblas.Nd4jBlas - Number of threads used for BLAS: 2
[main] INFO org.nd4j.linalg.api.ops.executioner.DefaultOpExecutioner - Backend used: [CPU]; OS: [Mac OS X]
[main] INFO org.nd4j.linalg.api.ops.executioner.DefaultOpExecutioner - Cores: [4]; Memory: [1.8GB];
[main] INFO org.nd4j.linalg.api.ops.executioner.DefaultOpExecutioner - Blas vendor: [OPENBLAS]
[main] INFO org.deeplearning4j.nn.graph.ComputationGraph - Starting ComputationGraph with WorkspaceModes set to [training: ENABLED; inference: ENABLED], cacheMode set to [NONE]
==========================================================================================================================================================================================================================================================
VertexName (VertexType) nIn,nOut TotalParams ParamsShape Vertex Inputs InputShape OutputShape
==========================================================================================================================================================================================================================================================
input (InputVertex) -,- - - - - -
cnn1 (ConvolutionLayer) 1,40 280 W:{40,1,3,2}, b:{1,40} [input] InputTypeConvolutional(h=112,w=6,c=1) InputTypeConvolutional(h=112,w=6,c=40)
cnn2 (ConvolutionLayer) 40,30 4830 W:{30,40,2,2}, b:{1,30} [cnn1] InputTypeConvolutional(h=112,w=6,c=40) InputTypeConvolutional(h=112,w=6,c=30)
pool1 (SubsamplingLayer) -,- 0 - [cnn2] InputTypeConvolutional(h=112,w=6,c=30) InputTypeConvolutional(h=56,w=3,c=30)
cnn3 (ConvolutionLayer) 30,20 2420 W:{20,30,2,2}, b:{1,20} [pool1] InputTypeConvolutional(h=56,w=3,c=30) InputTypeConvolutional(h=56,w=3,c=20)
cnn4 (ConvolutionLayer) 20,95 1995 W:{95,20,1,1}, b:{1,95} [cnn3] InputTypeConvolutional(h=56,w=3,c=20) InputTypeConvolutional(h=56,w=3,c=95)
outputs (Yolo2OutputLayer) -,- 0 - [cnn4] InputTypeConvolutional(h=56,w=3,c=95) InputTypeConvolutional(h=56,w=3,c=95)
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Total Parameters: 9525
Trainable Parameters: 9525
Frozen Parameters: 0
==========================================================================================================================================================================================================================================================
[main] INFO play.api.Play - Application started (Prod)
[main] INFO play.core.server.NettyServer - Listening for HTTP on /0:0:0:0:0:0:0:0:9000
[main] INFO org.deeplearning4j.ui.play.PlayUIServer - DL4J UI Server started at http://localhost:9000
[main] INFO org.deeplearning4j.ui.play.PlayUIServer - StatsStorage instance attached to UI: InMemoryStatsStorage(uid=b9d9382e)
[main] INFO org.deeplearning4j.optimize.listeners.ScoreIterationListener - Score at iteration 0 is 109.92279276251793
Exception in thread "AMDSI prefetch thread" java.lang.RuntimeException: java.lang.IllegalArgumentException: Invalid indices: cannot get [0,0,56,0] from a [1, 18, 56, 3] NDArray
at org.deeplearning4j.datasets.iterator.AsyncMultiDataSetIterator$AsyncPrefetchThread.run(AsyncMultiDataSetIterator.java:374)
Caused by: java.lang.IllegalArgumentException: Invalid indices: cannot get [0,0,56,0] from a [1, 18, 56, 3] NDArray
at org.nd4j.linalg.api.shape.Shape.getOffsetUnsafe(Shape.java:946)
at org.nd4j.linalg.api.ndarray.BaseNDArray.putScalar(BaseNDArray.java:1426)
at faultrecordreader.FaultObjectDetectionRecordReader.label(FaultObjectDetectionRecordReader.java:216)
at faultrecordreader.FaultObjectDetectionRecordReader.next(FaultObjectDetectionRecordReader.java:171)
at org.deeplearning4j.datasets.datavec.RecordReaderMultiDataSetIterator.next(RecordReaderMultiDataSetIterator.java:144)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:366)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:457)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:86)
at org.deeplearning4j.datasets.iterator.impl.MultiDataSetIteratorAdapter.next(MultiDataSetIteratorAdapter.java:62)
at org.deeplearning4j.datasets.iterator.impl.MultiDataSetIteratorAdapter.next(MultiDataSetIteratorAdapter.java:13)
at org.deeplearning4j.datasets.iterator.AsyncMultiDataSetIterator$AsyncPrefetchThread.run(AsyncMultiDataSetIterator.java:346)
Exception in thread "main" java.lang.IllegalArgumentException: Invalid indices: cannot get [0,0,56,0] from a [1, 18, 56, 3] NDArray
at org.nd4j.linalg.api.shape.Shape.getOffsetUnsafe(Shape.java:946)
at org.nd4j.linalg.api.ndarray.BaseNDArray.putScalar(BaseNDArray.java:1426)
at faultrecordreader.FaultObjectDetectionRecordReader.label(FaultObjectDetectionRecordReader.java:216)
at faultrecordreader.FaultObjectDetectionRecordReader.next(FaultObjectDetectionRecordReader.java:171)
at org.deeplearning4j.datasets.datavec.RecordReaderMultiDataSetIterator.next(RecordReaderMultiDataSetIterator.java:144)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:366)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:457)
at org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator.next(RecordReaderDataSetIterator.java:86)
at org.deeplearning4j.datasets.iterator.impl.MultiDataSetIteratorAdapter.next(MultiDataSetIteratorAdapter.java:62)
at org.deeplearning4j.datasets.iterator.impl.MultiDataSetIteratorAdapter.next(MultiDataSetIteratorAdapter.java:13)
at org.deeplearning4j.datasets.iterator.AsyncMultiDataSetIterator$AsyncPrefetchThread.run(AsyncMultiDataSetIterator.java:346)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment