Skip to content

Instantly share code, notes, and snippets.

@vvpreetham
Last active August 13, 2016 21:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vvpreetham/5562c8cfd0527a29c1ee to your computer and use it in GitHub Desktop.
Save vvpreetham/5562c8cfd0527a29c1ee to your computer and use it in GitHub Desktop.
Sample file showing that training of large degrees of freedom works, but trying to load it back breaks heap. This is a scaled down version of a larger corpus (Produces about approx 4.5GB file)
import org.canova.api.util.ClassPathResource
import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
import org.deeplearning4j.models.word2vec.{ VocabWord, Word2Vec }
import org.deeplearning4j.models.word2vec.wordstore.inmemory.InMemoryLookupCache
import org.deeplearning4j.text.sentenceiterator.{ BasicLineIterator, SentenceIterator }
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor
import org.deeplearning4j.text.tokenization.tokenizerfactory.{ DefaultTokenizerFactory, TokenizerFactory }
import org.slf4j.LoggerFactory
import scala.io.Source
object ProductCluster {
lazy val log = LoggerFactory.getLogger(ProductCluster.getClass)
def main(args: Array[String]): Unit = {
println("Building Model for noinit_unstemmed_mixed_adagrad_v1000_5f...")
noinit_unstemmed_adagrad_v1000_5f()
}
def noinit_unstemmed_adagrad_v1000_5f(): Unit = {
println("Building Model for noinit_unstemmed_adagrad_v1000_5f...")
val filePath = "./resources/unstemmed_product_desc.txt"
val path2Save = "./resources/noinit_unstemmed_adagrad_v1000_5f.bin"
println("Input : " + filePath)
println("Output : " + filePath)
val iter = new BasicLineIterator(filePath)
val t = new DefaultTokenizerFactory()
t.setTokenPreProcessor(new CommonPreprocessor())
val cache = new InMemoryLookupCache()
val table = new InMemoryLookupTable.Builder[VocabWord]()
.vectorLength(1000)
.useAdaGrad(true)
.cache(cache)
.lr(0.025f).build()
println("Building model....")
val word2Vec: Word2Vec = new Word2Vec.Builder()
.minWordFrequency(5)
.iterations(1)
.epochs(2)
.layerSize(1000)
.seed(42)
.windowSize(5)
.iterate(iter)
.tokenizerFactory(t)
.lookupTable(table)
.vocabCache(cache)
.build()
println("Fitting noinit_unstemmed_adagrad_v1000_5f model....")
word2Vec.fit()
println("Training finished...")
WordVectorSerializer.writeFullModel(word2Vec, path2Save)
println("Saved to " + path2Save)
}
}
// ------ Second file -----
import org.deeplearning4j.models.word2vec.{VocabWord, Word2Vec}
import org.slf4j.LoggerFactory
import scala.io.Source
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
object ContextMatch {
def main(args: Array[String]) : Unit = {
val path2Save = "./resources/noinit_unstemmed_adagrad_v1000_5f.bin"
val word2Vec: Word2Vec = WordVectorSerializer.loadFullModel(path2Save)
val readMatch = "./resources/match.txt" // Should be some file on your system
for (line <- Source.fromFile(readMatch).getLines) {
val lst2 = word2Vec.wordsNearest(line, 10)
println(s"Closest words to $line are : " + lst2)
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment