-
-
Save crockpotveggies/c267670b76dcb2ca5260692bfb321308 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
gradle run | |
:compileJava UP-TO-DATE | |
:compileScala | |
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=512m; support was removed in 8.0 | |
/Users/justin/Projects/dl4j-ethnicity-test/src/main/scala/ai/bernie/researchtests/CNN.scala:28: method list in class Builder is deprecated: see corresponding Javadoc for more information. | |
.list(9) | |
^ | |
one warning found | |
:processResources UP-TO-DATE | |
:classes | |
:run | |
Exception in thread "main" com.fasterxml.jackson.databind.JsonMappingException: Could not find creator property with name 'id' (in class org.apache.spark.rdd.RDDOperationScope) | |
at [Source: {"id":"0","name":"parallelize"}; line: 1, column: 1] | |
at com.fasterxml.jackson.databind.JsonMappingException.from(JsonMappingException.java:148) | |
at com.fasterxml.jackson.databind.DeserializationContext.mappingException(DeserializationContext.java:843) | |
at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.addBeanProps(BeanDeserializerFactory.java:533) | |
at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.buildBeanDeserializer(BeanDeserializerFactory.java:220) | |
at com.fasterxml.jackson.databind.deser.BeanDeserializerFactory.createBeanDeserializer(BeanDeserializerFactory.java:143) | |
at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer2(DeserializerCache.java:405) | |
at com.fasterxml.jackson.databind.deser.DeserializerCache._createDeserializer(DeserializerCache.java:354) | |
at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCache2(DeserializerCache.java:262) | |
at com.fasterxml.jackson.databind.deser.DeserializerCache._createAndCacheValueDeserializer(DeserializerCache.java:242) | |
at com.fasterxml.jackson.databind.deser.DeserializerCache.findValueDeserializer(DeserializerCache.java:143) | |
at com.fasterxml.jackson.databind.DeserializationContext.findRootValueDeserializer(DeserializationContext.java:439) | |
at com.fasterxml.jackson.databind.ObjectMapper._findRootDeserializer(ObjectMapper.java:3666) | |
at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:3558) | |
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2578) | |
at org.apache.spark.rdd.RDDOperationScope$.fromJson(RDDOperationScope.scala:85) | |
at org.apache.spark.rdd.RDD$$anonfun$34.apply(RDD.scala:1630) | |
at org.apache.spark.rdd.RDD$$anonfun$34.apply(RDD.scala:1630) | |
at scala.Option.map(Option.scala:146) | |
at org.apache.spark.rdd.RDD.<init>(RDD.scala:1630) | |
at org.apache.spark.rdd.ParallelCollectionit>(ParallelCollectionRDD.scala:90) | |
at org.apache.spark.SparkContext$$anonfun$parallelize$1.apply(SparkContext.scala:730) | |
at org.apache.spark.SparkContext$$anonfun$parallelize$1.apply(SparkContext.scala:728) | |
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150) | |
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111) | |
at org.apache.spark.SparkContext.withScope(SparkContext.scala:714) | |
at org.apache.spark.SparkContext.parallelize(SparkContext.scala:728) | |
at ai.bernie.researchtests.TrainNet$.main(TrainNet.scala:78) | |
at ai.bernie.researchtests.TrainNet.main(TrainNet.scala) | |
:run FAILED | |
FAILURE: Build failed with an exception. | |
* What went wrong: | |
Execution failed for task ':run'. | |
> Process 'command '/Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home/bin/java'' finished with non-zero exit value 1 | |
######## | |
code used in main class up until error is triggered | |
######## | |
object TrainNet { | |
lazy val log: Logger = LoggerFactory.getLogger(TrainNet.getClass) | |
def main(args: Array[String]) = { | |
// create spark context | |
val sparkConf = new SparkConf() | |
sparkConf.setMaster("local[2]") | |
sparkConf.setAppName("SPARKTEST") | |
sparkConf.set(SparkDl4jMultiLayer.AVERAGE_EACH_ITERATION, String.valueOf(true)) | |
val sc = new SparkContext(sparkConf) | |
// neural network parameters | |
val imageWidth = 100 | |
val imageHeight = 100 | |
val nChannels = 1 | |
val outputNum = 8 // number of labels | |
val numSamples = 9383 // LFWLoader.NUM_IMAGES | |
val batchSize = 10 | |
val iterations = 5 | |
val splitTrainNum = (batchSize*.8).toInt | |
val seed = 123 | |
val listenerFreq = iterations/5 | |
val testInputBuilder = mutable.ArrayBuilder.make[INDArray] | |
val testLabelsBuilder = mutable.ArrayBuilder.make[INDArray] | |
// load datasets | |
// training dataset (parallelized with Spark) | |
log.info("Load training data...") | |
val trainLabels = new java.util.ArrayList[String]() | |
val trainRecordReader = new ImageRecordReader(imageWidth, imageHeight, nChannels, true, trainLabels) | |
trainRecordReader.initialize(new FileSplit(new java.io.File("./cnn_dataset"))) | |
//val dataSetIterator: DataSetIterator = new RecordReaderDataSetIterator(recordReader, batchSize, imageWidth*imageHeight*nChannels, labels.size()) | |
val trainingSetIterator: DataSetIterator = new RecordReaderDataSetIterator(trainRecordReader, batchSize, -1, trainLabels.size()) | |
val list = new util.ArrayList[DataSet](numSamples) | |
while(trainingSetIterator.hasNext) list.add(trainingSetIterator.next) | |
val trainRDD = sc.parallelize(list) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment