Created
May 1, 2016 18:52
-
-
Save geoHeil/2feb74f303b0cd97cb7a42918efc90c3 to your computer and use it in GitHub Desktop.
spark-jobserver 2.11 exception
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
LOG_DIR empty; logging will go to /tmp/job-server | |
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=512m; support was removed in 8.0 | |
[2016-05-01 18:35:28,375] INFO spark.jobserver.JobServer$ [] [] - Starting JobServer with config { | |
# system properties | |
"app" : { | |
# system properties | |
"name" : "spark.jobserver.JobServer" | |
}, | |
# merge of /app/docker.conf: 39,application.conf: 101 | |
# universal context configuration. These settings can be overridden, see README.md | |
# Default settings for ad hoc as well as manually created contexts | |
# You can add any Spark config params here, for example, spark.mesos.coarse = true | |
"context-settings" : { | |
# application.conf: 107 | |
# A zero-arg class implementing spark.jobserver.context.SparkContextFactory | |
# Determines the type of jobs that can run in a SparkContext | |
"context-factory" : "spark.jobserver.context.DefaultSparkContextFactory", | |
# application.conf: 125 | |
# Timeout for SupervisorActor to wait for forked (separate JVM) contexts to initialize | |
"context-init-timeout" : "60s", | |
# /app/docker.conf: 41 | |
# Executor memory per node, -Xmx style eg 512m, #1G, etc. | |
"memory-per-node" : "512m", | |
# /app/docker.conf: 40 | |
# Number of cores to allocate. Required. | |
"num-cpu-cores" : 2, | |
# application.conf: 127 | |
"passthrough" : { | |
# application.conf: 128 | |
"spark" : { | |
# application.conf: 128 | |
"driver" : { | |
# application.conf: 128 | |
# Ignore the Multiple context exception related with SPARK-2243 | |
"allowMultipleContexts" : true | |
} | |
} | |
}, | |
# application.conf: 109 | |
"streaming" : { | |
# application.conf: 111 | |
# Default batch interval for Spark Streaming contexts in milliseconds | |
"batch_interval" : 1000, | |
# application.conf: 114 | |
# if true, stops gracefully by waiting for the processing of all received data to be completed | |
"stopGracefully" : true, | |
# application.conf: 118 | |
# if true, stops the SparkContext with the StreamingContext. The underlying SparkContext will be | |
# stopped regardless of whether the StreamingContext has been started. | |
"stopSparkContext" : true | |
} | |
}, | |
# application.conf: 91 | |
# predefined Spark contexts | |
# Below is an example, but do not uncomment it. Everything defined here is carried over to | |
# deploy-time configs, so they will be created in all environments. :( | |
"contexts" : {}, | |
# system properties | |
"driver" : { | |
# system properties | |
"extraJavaOptions" : "-XX:+UseConcMarkSweepGC\n -verbose:gc -XX:+PrintGCTimeStamps -Xloggc:/app/gc.out\n -XX:MaxPermSize=512m\n -XX:+CMSClassUnloadingEnabled -XX:MaxDirectMemorySize=512M -XX:+HeapDumpOnOutOfMemoryError -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote.port=9999 -Dcom.sun.management.jmxremote.rmi.port=9999 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dlog4j.configuration=file:/app/log4j-server.properties\n -DLOG_DIR=/tmp/job-server -Dspark.executor.uri= ", | |
# system properties | |
"memory" : "1G" | |
}, | |
# system properties | |
"executor" : { | |
# system properties | |
"extraJavaOptions" : "-Dlog4j.configuration=file:/app/log4j-server.properties\n -DLOG_DIR=/tmp/job-server", | |
# system properties | |
"uri" : "" | |
}, | |
# /app/docker.conf: 57 | |
# This needs to match SPARK_HOME for cluster SparkContexts to be created successfully | |
"home" : "/usr/local/spark", | |
# system properties | |
"jars" : "file:/app/spark-job-server.jar", | |
# /app/docker.conf: 11 | |
# Default # of CPUs for jobs to use for Spark standalone cluster | |
"job-number-cpus" : 4, | |
# merge of /app/docker.conf: 13,application.conf: 7 | |
"jobserver" : { | |
# application.conf: 9 | |
"bind-address" : "0.0.0.0", | |
# application.conf: 70 | |
# Time out for job server to wait while creating contexts | |
"context-creation-timeout" : "60 s", | |
# /app/docker.conf: 17 | |
"context-per-jvm" : false, | |
# application.conf: 28 | |
"datadao" : { | |
# application.conf: 31 | |
# storage directory for files that are uploaded to the server | |
# via POST/data commands | |
"rootdir" : "/tmp/spark-jobserver/upload" | |
}, | |
# application.conf: 24 | |
"filedao" : { | |
# application.conf: 25 | |
"rootdir" : "/tmp/spark-jobserver/filedao/data" | |
}, | |
# application.conf: 15 | |
# Number of job results to keep per JobResultActor/context | |
"job-result-cache-size" : 5000, | |
# /app/docker.conf: 15 | |
"jobdao" : "spark.jobserver.io.JobSqlDAO", | |
# application.conf: 77 | |
# Number of jobs that can be run simultaneously per context | |
# If not set, defaults to number of cores on machine where jobserver is running | |
"max-jobs-per-context" : 8, | |
# application.conf: 73 | |
# Time out for job server to wait while creating named objects | |
"named-object-creation-timeout" : "60 s", | |
# /app/docker.conf: 14 | |
"port" : 8090, | |
# application.conf: 67 | |
# The ask pattern timeout for Api | |
"short-timeout" : "3 s", | |
# merge of /app/docker.conf: 19,application.conf: 40 | |
"sqldao" : { | |
# application.conf: 59 | |
# DB connection pool settings | |
"dbcp" : { | |
# application.conf: 62 | |
"initialsize" : 10, | |
# application.conf: 60 | |
"maxactive" : 20, | |
# application.conf: 61 | |
"maxidle" : 10 | |
}, | |
# merge of /app/docker.conf: 25,application.conf: 52 | |
# Full JDBC URL / init string, along with username and password. Sorry, needs to match above. | |
# Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass | |
"jdbc" : { | |
# application.conf: 55 | |
"password" : "", | |
# /app/docker.conf: 25 | |
# Full JDBC URL / init string. Sorry, needs to match above. | |
# Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass | |
"url" : "jdbc:h2:file:/database/h2-db", | |
# application.conf: 54 | |
"user" : "" | |
}, | |
# application.conf: 45 | |
# JDBC driver, full classpath | |
"jdbc-driver" : "org.h2.Driver", | |
# /app/docker.conf: 21 | |
# Directory where default H2 driver stores its data. Only needed for H2. | |
"rootdir" : "/database", | |
# application.conf: 42 | |
# Slick database driver, full classpath | |
"slick-driver" : "scala.slick.driver.H2Driver" | |
}, | |
# application.conf: 85 | |
# spark broadcst factory in yarn deployment | |
# Versions prior to 1.1.0, spark default broadcast factory is org.apache.spark.broadcast.HttpBroadcastFactory. | |
# Can't start multiple sparkContexts in the same JVM with HttpBroadcastFactory. | |
"yarn-broadcast-factory" : "org.apache.spark.broadcast.TorrentBroadcastFactory", | |
# application.conf: 80 | |
# in yarn deployment, time out for job server to wait while creating contexts | |
"yarn-context-creation-timeout" : "40 s" | |
}, | |
# /app/docker.conf: 7 | |
# | |
"master" : "local[4]", | |
# system properties | |
"submit" : { | |
# system properties | |
"deployMode" : "client" | |
}, | |
# application.conf: 5 | |
# spark web UI port | |
"webUrlPort" : 8080 | |
} | |
[2016-05-01 18:35:28,391] INFO spark.jobserver.JobServer$ [] [] - Spray config: { | |
# reference.conf: 1672 | |
# Enables/disables automatic back-pressure handling by write buffering and | |
# receive throttling | |
"automatic-back-pressure-handling" : "on", | |
# reference.conf: 1674 | |
"back-pressure" : { | |
# reference.conf: 1678 | |
# The reciprocal rate of requested Acks per NoAcks. E.g. the default value | |
# '10' means that every 10th write request is acknowledged. This affects the | |
# number of writes each connection has to buffer even in absence of back-pressure. | |
"noack-rate" : 10, | |
# reference.conf: 1683 | |
# The lower limit the write queue size has to shrink to before reads are resumed. | |
# Use 'infinite' to disable the low-watermark so that reading is resumed instantly | |
# after the next successful write. | |
"reading-low-watermark" : "infinite" | |
}, | |
# reference.conf: 1640 | |
# The time period within which the TCP binding process must be completed. | |
# Set to `infinite` to disable. | |
"bind-timeout" : "1s", | |
# reference.conf: 1559 | |
# The period during which a service must respond to a `ChunkedRequestStart` message | |
# with a `RegisterChunkHandler` message. During the registration period reading from | |
# the network is suspended. It is still possible that some chunks have already been | |
# received which will be buffered until the registration is received or the timeout is | |
# triggered. If the timeout is triggered the connection is immediately aborted. | |
"chunkhandler-registration-timeout" : "500 ms", | |
# reference.conf: 1602 | |
# Enables/disables an alternative response streaming mode that doesn't | |
# use `Transfer-Encoding: chunked` but rather renders the individual | |
# MessageChunks coming in from the application as parts of the original | |
# response entity. | |
# Enabling this mode causes all connections to be closed after a streaming | |
# response has been finished since there is no other way to signal the | |
# response end to the client. | |
# Note that chunkless-streaming is implicitly enabled when streaming | |
# responses to HTTP/1.0 clients (since they don't support | |
# `Transfer-Encoding: chunked`) | |
"chunkless-streaming" : "off", | |
# application.conf: 190 | |
# Needed for HTTP/1.0 requests with missing Host headers | |
"default-host-header" : "spray.io:8765", | |
# application.conf: 185 | |
# ssl engine provider protocols | |
"enabledProtocols" : [ | |
# application.conf: 185 | |
"SSLv3", | |
# application.conf: 185 | |
"TLSv1" | |
], | |
# application.conf: 180 | |
# see http://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#SSLContext for more examples | |
# typical are either SSL or TLS | |
"encryptionType" : "SSL", | |
# application.conf: 186 | |
"idle-timeout" : "60 s", | |
# application.conf: 181 | |
"keystoreType" : "JKS", | |
# reference.conf: 1636 | |
# For HTTPS connections this setting specified the maximum number of | |
# bytes that are encrypted in one go. Large responses are broken down in | |
# chunks of this size so as to already begin sending before the response has | |
# been encrypted entirely. | |
"max-encryption-chunk-size" : "1m", | |
# merge of application.conf: 193,reference.conf: 1824 | |
# The (default) configuration of the HTTP message parser for the server and | |
# the client. | |
# IMPORTANT: These settings (i.e. children of `spray.can.parsing`) can't be directly | |
# overridden in `application.conf` to change the parser settings for client and server | |
# altogether (see https://github.com/spray/spray/issues/346). Instead, override the | |
# concrete settings beneath `spray.can.server.parsing` and `spray.can.client.parsing` | |
# where these settings are copied to. | |
"parsing" : { | |
# reference.conf: 1861 | |
# limits for the number of different values per header type that the | |
# header cache will hold | |
"header-cache" : { | |
# reference.conf: 1863 | |
"Content-MD5" : 0, | |
# reference.conf: 1864 | |
"Date" : 0, | |
# reference.conf: 1865 | |
"If-Match" : 0, | |
# reference.conf: 1866 | |
"If-Modified-Since" : 0, | |
# reference.conf: 1867 | |
"If-None-Match" : 0, | |
# reference.conf: 1868 | |
"If-Range" : 0, | |
# reference.conf: 1869 | |
"If-Unmodified-Since" : 0, | |
# reference.conf: 1870 | |
"User-Agent" : 32, | |
# reference.conf: 1862 | |
"default" : 12 | |
}, | |
# reference.conf: 1857 | |
# Enables/disables the logging of warning messages in case an incoming | |
# message (request or response) contains an HTTP header which cannot be | |
# parsed into its high-level model class due to incompatible syntax. | |
# Note that, independently of this settings, spray will accept messages | |
# with such headers as long as the message as a whole would still be legal | |
# under the HTTP specification even without this header. | |
# If a header cannot be parsed into a high-level model instance it will be | |
# provided as a `RawHeader`. | |
"illegal-header-warnings" : "on", | |
# reference.conf: 1876 | |
# Sets the size starting from which incoming http-messages will be delivered | |
# in chunks regardless of whether chunking is actually used on the wire. | |
# Set to infinite to disable auto chunking. | |
"incoming-auto-chunking-threshold-size" : "infinite", | |
# reference.conf: 1832 | |
"max-chunk-ext-length" : 256, | |
# reference.conf: 1833 | |
"max-chunk-size" : "1m", | |
# application.conf: 193 | |
# Increase this in order to upload bigger job jars | |
"max-content-length" : "30m", | |
# reference.conf: 1830 | |
"max-header-count" : 64, | |
# reference.conf: 1828 | |
"max-header-name-length" : 64, | |
# reference.conf: 1829 | |
"max-header-value-length" : "8k", | |
# reference.conf: 1827 | |
"max-response-reason-length" : 64, | |
# reference.conf: 1826 | |
# The limits for the various parts of the HTTP message parser. | |
"max-uri-length" : "2k", | |
# reference.conf: 1881 | |
# Enables/disables inclusion of an SSL-Session-Info header in parsed | |
# messages over SSL transports (i.e., HttpRequest on server side and | |
# HttpResponse on client side). | |
"ssl-session-info-header" : "off", | |
# reference.conf: 1847 | |
# Sets the strictness mode for parsing request target URIs. | |
# The following values are defined: | |
# | |
# `strict`: RFC3986-compliant URIs are required, | |
# a 400 response is triggered on violations | |
# | |
# `relaxed`: all visible 7-Bit ASCII chars are allowed | |
# | |
# `relaxed-with-raw-query`: like `relaxed` but additionally | |
# the URI query is not parsed, but delivered as one raw string | |
# as the `key` value of a single Query structure element. | |
# | |
"uri-parsing-mode" : "strict" | |
}, | |
# reference.conf: 1658 | |
# The time after which a connection is aborted (RST) after a parsing error | |
# occurred. The timeout prevents a connection which is already known to be | |
# erroneous from receiving evermore data even if all of the data will be ignored. | |
# However, in case of a connection abortion the client usually doesn't properly | |
# receive the error response. This timeout is a trade-off which allows the client | |
# some time to finish its request and receive a proper error response before the | |
# connection is forcibly closed to free resources. | |
"parsing-error-abort-timeout" : "2s", | |
# application.conf: 188 | |
# for maximum performance (prevents StopReading / ResumeReading messages to the IOBridge) | |
"pipelining-limit" : 2, | |
# application.conf: 183 | |
# key manager factory provider | |
"provider" : "SunX509", | |
# reference.conf: 1582 | |
# Enables/disables the addition of a `Raw-Request-URI` header holding the | |
# original raw request URI as the client has sent it. | |
"raw-request-uri-header" : "off", | |
# reference.conf: 1569 | |
# The "granularity" of timeout checking for both idle connections timeouts | |
# as well as request timeouts, should rarely be needed to modify. | |
# If set to `infinite` request and connection timeout checking is disabled. | |
"reaping-cycle" : "250 ms", | |
# reference.conf: 1649 | |
# The time period within which a connection handler must have been | |
# registered after the bind handler has received a `Connected` event. | |
# Set to `infinite` to disable. | |
"registration-timeout" : "1s", | |
# reference.conf: 1578 | |
# Enables/disables the addition of a `Remote-Address` header | |
# holding the clients (remote) IP address. | |
"remote-address-header" : "off", | |
# reference.conf: 1625 | |
# If this setting is non-zero the HTTP server automatically aggregates | |
# incoming request chunks into full HttpRequests before dispatching them to | |
# the application. If the size of the aggregated requests surpasses the | |
# specified limit the server responds with a `413 Request Entity Too Large` | |
# error response before closing the connection. | |
# Set to zero to disable automatic request chunk aggregation and have | |
# ChunkedRequestStart, MessageChunk and ChunkedMessageEnd messages be | |
# dispatched to the handler. | |
"request-chunk-aggregation-limit" : "1m", | |
# application.conf: 187 | |
"request-timeout" : "40 s", | |
# reference.conf: 1630 | |
# The initial size if the buffer to render the response headers in. | |
# Can be used for fine-tuning response rendering performance but probably | |
# doesn't have to be fiddled with in most applications. | |
"response-header-size-hint" : 512, | |
# reference.conf: 1517-2132 | |
# Always contains the deployed version of spray. | |
# Referenced, for example, from the `spray.can.server.server-header` setting. | |
"server-header" : "spray-can/1.3.3", | |
# reference.conf: 1523 | |
# Enables/disables SSL encryption. | |
# If enabled the server uses the implicit `ServerSSLEngineProvider` member | |
# of the `Bind` command to create `SSLEngine` instances for the underlying | |
# IO connection. | |
"ssl-encryption" : "off", | |
# reference.conf: 1687 | |
# Enables more verbose DEBUG logging for debugging SSL related issues. | |
"ssl-tracing" : "off", | |
# reference.conf: 1574 | |
# Enables/disables support for statistics collection and querying. | |
# Even though stats keeping overhead is small, | |
# for maximum performance switch off when not needed. | |
"stats-support" : "on", | |
# reference.conf: 1564 | |
# The path of the actor to send `spray.http.Timedout` messages to. | |
# If empty all `Timedout` messages will go to the "regular" request | |
# handling actor. | |
"timeout-handler" : "", | |
# reference.conf: 1552 | |
# After a `Timedout` message has been sent to the timeout handler and the | |
# request still hasn't been completed after the time period set here | |
# the server will complete the request itself with an error response. | |
# Set to `infinite` to disable timeout timeouts. | |
"timeout-timeout" : "2 s", | |
# reference.conf: 1590 | |
# Enables/disables automatic handling of HEAD requests. | |
# If this setting is enabled the server dispatches HEAD requests as GET | |
# requests to the application and automatically strips off all message | |
# bodies from outgoing responses. | |
# Note that, even when this setting is off the server will never send | |
# out message bodies on responses to HEAD requests. | |
"transparent-head-requests" : "on", | |
# reference.conf: 1644 | |
# The time period within which the TCP unbinding process must be completed. | |
# Set to `infinite` to disable. | |
"unbind-timeout" : "1s", | |
# reference.conf: 1615 | |
# Enables/disables the logging of the full (potentially multiple line) | |
# error message to the server logs. | |
# If disabled only a single line will be logged. | |
"verbose-error-logging" : "off", | |
# reference.conf: 1610 | |
# Enables/disables the returning of more detailed error messages to | |
# the client in the error response. | |
# Should be disabled for browser-facing APIs due to the risk of XSS attacks | |
# and (probably) enabled for internal or non-browser APIs. | |
# Note that spray will always produce log messages containing the full | |
# error details. | |
"verbose-error-messages" : "off" | |
} | |
[2016-05-01 18:35:29,311] INFO ka.event.slf4j.Slf4jLogger [] [] - Slf4jLogger started | |
[2016-05-01 18:35:29,422] INFO Remoting [] [Remoting] - Starting remoting | |
[2016-05-01 18:35:29,671] INFO Remoting [] [Remoting] - Remoting started; listening on addresses :[akka.tcp://JobServer@127.0.0.1:45095] | |
[2016-05-01 18:35:29,697] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:45095] - Starting up... | |
Uncaught error from thread [JobServer-akka.actor.default-dispatcher-5] shutting down JVM since 'akka.jvm-exit-on-fatal-error' is enabled for ActorSystem[JobServer] | |
java.lang.NoClassDefFoundError: scala/runtime/AbstractPartialFunction$mcVL$sp | |
at java.lang.ClassLoader.defineClass1(Native Method) | |
at java.lang.ClassLoader.defineClass(ClassLoader.java:763) | |
at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) | |
at java.net.URLClassLoader.defineClass(URLClassLoader.java:467) | |
at java.net.URLClassLoader.access$100(URLClassLoader.java:73) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:368) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at akka.cluster.ClusterDaemon.receive(ClusterDaemon.scala:166) | |
at akka.actor.ActorCell.newActor(ActorCell.scala:558) | |
at akka.actor.ActorCell.create(ActorCell.scala:578) | |
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:456) | |
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:478) | |
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:263) | |
at akka.dispatch.Mailbox.run(Mailbox.scala:219) | |
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) | |
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) | |
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) | |
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) | |
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) | |
Caused by: java.lang.ClassNotFoundException: scala.runtime.AbstractPartialFunction$mcVL$sp | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:381) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
... 23 more | |
[2016-05-01 18:35:29,789] ERROR akka.actor.ActorSystemImpl [] [ActorSystem(JobServer)] - Uncaught error from thread [JobServer-akka.actor.default-dispatcher-5] shutting down JVM since 'akka.jvm-exit-on-fatal-error' is enabled | |
java.lang.NoClassDefFoundError: scala/runtime/AbstractPartialFunction$mcVL$sp | |
at java.lang.ClassLoader.defineClass1(Native Method) | |
at java.lang.ClassLoader.defineClass(ClassLoader.java:763) | |
at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) | |
at java.net.URLClassLoader.defineClass(URLClassLoader.java:467) | |
at java.net.URLClassLoader.access$100(URLClassLoader.java:73) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:368) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at akka.cluster.ClusterDaemon.receive(ClusterDaemon.scala:166) | |
at akka.actor.ActorCell.newActor(ActorCell.scala:558) | |
at akka.actor.ActorCell.create(ActorCell.scala:578) | |
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:456) | |
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:478) | |
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:263) | |
at akka.dispatch.Mailbox.run(Mailbox.scala:219) | |
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) | |
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) | |
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) | |
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) | |
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) | |
Caused by: java.lang.ClassNotFoundException: scala.runtime.AbstractPartialFunction$mcVL$sp | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:381) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
... 23 more | |
[2016-05-01 18:35:29,815] ERROR ka.actor.OneForOneStrategy [] [akka://JobServer/system/cluster] - head of empty list | |
java.util.NoSuchElementException: head of empty list | |
at scala.collection.immutable.Nil$.head(List.scala:420) | |
at scala.collection.immutable.Nil$.head(List.scala:417) | |
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:516) | |
at akka.actor.ActorCell.invoke(ActorCell.scala:487) | |
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:238) | |
at akka.dispatch.Mailbox.run(Mailbox.scala:220) | |
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) | |
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) | |
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) | |
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) | |
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) | |
Uncaught error from thread [JobServer-akka.actor.default-dispatcher-6] shutting down JVM since 'akka.jvm-exit-on-fatal-error' is enabled for ActorSystem[JobServer] | |
java.lang.NoClassDefFoundError: scala/runtime/AbstractPartialFunction$mcVL$sp | |
at java.lang.ClassLoader.defineClass1(Native Method) | |
at java.lang.ClassLoader.defineClass(ClassLoader.java:763) | |
at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) | |
at java.net.URLClassLoader.defineClass(URLClassLoader.java:467) | |
at java.net.URLClassLoader.access$100(URLClassLoader.java:73) | |
[2016-05-01 18:35:29,822] ERROR akka.actor.ActorCell [] [akka://JobServer/system/cluster] - changing Recreate into Create after java.util.NoSuchElementException: head of empty list | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:368) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at akka.cluster.ClusterCoreSupervisor.receive(ClusterDaemon.scala:208) | |
at akka.actor.ActorCell.newActor(ActorCell.scala:558) | |
at akka.actor.ActorCell.create(ActorCell.scala:578) | |
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:456) | |
[2016-05-01 18:35:29,825] ERROR akka.actor.ActorSystemImpl [] [ActorSystem(JobServer)] - Uncaught error from thread [JobServer-akka.actor.default-dispatcher-6] shutting down JVM since 'akka.jvm-exit-on-fatal-error' is enabled | |
java.lang.NoClassDefFoundError: scala/runtime/AbstractPartialFunction$mcVL$sp | |
at java.lang.ClassLoader.defineClass1(Native Method) | |
at java.lang.ClassLoader.defineClass(ClassLoader.java:763) | |
at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) | |
at java.net.URLClassLoader.defineClass(URLClassLoader.java:467) | |
at java.net.URLClassLoader.access$100(URLClassLoader.java:73) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:368) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at akka.cluster.ClusterCoreSupervisor.receive(ClusterDaemon.scala:208) | |
at akka.actor.ActorCell.newActor(ActorCell.scala:558) | |
at akka.actor.ActorCell.create(ActorCell.scala:578) | |
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:456) | |
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:478) | |
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:263) | |
at akka.dispatch.Mailbox.run(Mailbox.scala:219) | |
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) | |
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) | |
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) | |
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) | |
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) | |
Caused by: java.lang.ClassNotFoundException: scala.runtime.AbstractPartialFunction$mcVL$sp | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:381) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
... 23 more | |
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:478) | |
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:263) | |
at akka.dispatch.Mailbox.run(Mailbox.scala:219) | |
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment