Skip to content

Instantly share code, notes, and snippets.

val pool = new BoneCP(config)
val getConn = () => pool.getConnection()
val relConn = (c: Connection) => pool.releaseConnection(c)
class MyActor(get: () => Connection, rel: (c: Connection) => Unit) extends Actor {
lazy val c = get()
override def preRestart(why: Throwable, msg: Option[Any]): Unit = {
rel(c)
trait PersonClient {
// supply a router with a pool of PersonDao:
val personPool: ActorRef
// how long should we wait for a response from PersonDao:
val timeoutInMillis: Long
implicit val timeout = Timeout(timeoutInMillis millis)
def addPerson(p: Person): Future[Int] =
case class Person(name: String, email: String)
case class PersonById(id: Int)
class PersonDao(cf: () => Connection) extends Actor {
lazy val conn = cf()
override def preRestart(why: Throwable, msg: Option[Any]): Unit =
try { conn.close() }
def receive = {
val props = Props(new BasicJdbcActor(connFac))
.withDispatcher("my-dispatcher")
val pool = RoundRobinPool(4, supervisorStrategy = restartStrategy)
sys.actorOf(pool.props(props))
// very naive, be more specific based on your problem:
val restartStrategy = OneForOneStrategy(
maxNrOfRetries = 10,
withinTimeRange = 1 minute) {
case _ => Restart
}
def newPool(sys: ActorSystem): ActorRef = {
val props = Props(new BasicJdbcActor(connFac))
val pool = RoundRobinPool(4, supervisorStrategy = restartStrategy)
my-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 2
//2 threads per core
parallelism-factor = 2.0
// The max that the dispatcher will create:
val databaseUrl = "postgresql://some-hostname:5432/db-name"
Class.forName("my.sql.database.driver.classname")
class BasicJdbcActor(connFac: () => Connection) extends Actor {
lazy val conn = connFac()
override def preRestart(why: Throwable, msg: Option[Any]): Unit =
try { conn.close() }
@j14159
j14159 / gist:d3cbe172f7b962d74d09
Created July 18, 2014 20:40
Naive/early S3N RDD for Spark
/**
* Started to rough this naive S3-native filesystem RDD out because I need to use IAM
* profiles for S3 access and also https://issues.apache.org/jira/browse/HADOOP-3733.
*
* Use at your own risk, bear in mind this is maybe 30 - 45min of work and testing and
* expect it to behave as such.
*
* Feedback/criticism/discussion welcome via Github/Twitter
*
* In addition to Spark 1.0.x, this depends on Amazon's S3 SDK, dependency is as follows:
package definterp
import org.scalatest._
class AdditionProgram extends FlatSpec with Matchers {
val addition =
Lambda("x",
Lambda("y", LetRec("add-until",
Lambda("xx",