Skip to content

Instantly share code, notes, and snippets.

View JoshRosen's full-sized avatar

Josh Rosen JoshRosen

View GitHub Profile
package org.apache.spark.sql.catalyst.expressions.codegen
import org.codehaus.janino.SimpleCompiler
object CodeGenBenchmark {
def quasiquotes(): Unit = {
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.universe._
[run]
branch = true
parallel = true
data_file = ${COVERAGE_DIR}/coverage_data/coverage
[html]
ignore_errors = true
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.serializers.JavaSerializer
import com.esotericsoftware.kryo.io.{Input, Output}
class JavaSerializable extends Serializable
object KryoTest {
def main(args: Array[String]): Unit = {
val kryo = new Kryo()
package org.apache.spark.sql
import java.io.File
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import scala.util.Random
package org.apache.spark.sql
import java.io.File
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import scala.util.Random
package org.apache.spark.shuffle.unsafe;
import org.apache.spark.JavaAPISuite;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
@RunWith(Suite.class)
@Suite.SuiteClasses({
UnsafeShuffleWriterSuite.class,
JavaAPISuite.class
package org.apache.spark.sql
import org.apache.spark.unsafe.PlatformDependent
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.unsafe.memory.{MemoryAllocator, TaskMemoryManager, ExecutorMemoryManager}
import scala.util.Random
/**
* This benchmark measures the time to iterate over a BytesToBytesMap.
import java.io.{PrintWriter, FileOutputStream, File}
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.scheduler.{SparkListenerStageSubmitted, SparkListenerTaskEnd, SparkListener}
/**
* Extends [[ShuffledRDD]] to skip the shuffle fetching.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.expressions.{UnsafeFixedWidthAggregationMap, SpecificMutableRow, MutableRow, GenericRow}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.memory.MemoryAllocator
import org.openjdk.jmh.annotations._
import scala.util.Random
package org.apache.spark.sql
import org.apache.spark.unsafe.memory.{MemoryAllocator, TaskMemoryManager, ExecutorMemoryManager}
import scala.util.Random
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
/**