Skip to content

Instantly share code, notes, and snippets.

@andrewor14
Created June 24, 2016 22:04
Show Gist options
  • Save andrewor14/7ce4c37a3c6bcd5cc2b6b16c861859e9 to your computer and use it in GitHub Desktop.
Save andrewor14/7ce4c37a3c6bcd5cc2b6b16c861859e9 to your computer and use it in GitHub Desktop.
// Code generated by ColumnarBatchScan.scala when reading the column buffers
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 006 */ private Object[] references;
/* 007 */ private scala.collection.Iterator inmemorytablescan_input;
/* 008 */ private org.apache.spark.sql.execution.metric.SQLMetric inmemorytablescan_numOutputRows;
/* 009 */ private org.apache.spark.sql.execution.metric.SQLMetric inmemorytablescan_scanTime;
/* 010 */ private long inmemorytablescan_scanTime1;
/* 011 */ private org.apache.spark.sql.execution.vectorized.ColumnarBatch inmemorytablescan_batch;
/* 012 */ private int inmemorytablescan_batchIdx;
/* 013 */ private org.apache.spark.sql.execution.vectorized.ColumnVector inmemorytablescan_colInstance0;
/* 014 */ private org.apache.spark.sql.execution.vectorized.ColumnVector inmemorytablescan_colInstance1;
/* 015 */ private UnsafeRow inmemorytablescan_result;
/* 016 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder inmemorytablescan_holder;
/* 017 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter inmemorytablescan_rowWriter;
/* 018 */
/* 019 */ public GeneratedIterator(Object[] references) {
/* 020 */ this.references = references;
/* 021 */ }
/* 022 */
/* 023 */ public void init(int index, scala.collection.Iterator inputs[]) {
/* 024 */ partitionIndex = index;
/* 025 */ inmemorytablescan_input = inputs[0];
/* 026 */ this.inmemorytablescan_numOutputRows = (org.apache.spark.sql.execution.metric.SQLMetric) references[0];
/* 027 */ this.inmemorytablescan_scanTime = (org.apache.spark.sql.execution.metric.SQLMetric) references[1];
/* 028 */ inmemorytablescan_scanTime1 = 0;
/* 029 */ inmemorytablescan_batch = null;
/* 030 */ inmemorytablescan_batchIdx = 0;
/* 031 */ inmemorytablescan_colInstance0 = null;
/* 032 */ inmemorytablescan_colInstance1 = null;
/* 033 */ inmemorytablescan_result = new UnsafeRow(2);
/* 034 */ this.inmemorytablescan_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(inmemorytablescan_result, 0);
/* 035 */ this.inmemorytablescan_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(inmemorytablescan_holder, 2);
/* 036 */ }
/* 037 */
/* 038 */ private void inmemorytablescan_nextBatch() throws java.io.IOException {
/* 039 */ long getBatchStart = System.nanoTime();
/* 040 */ if (inmemorytablescan_input.hasNext()) {
/* 041 */ inmemorytablescan_batch = (org.apache.spark.sql.execution.vectorized.ColumnarBatch)inmemorytablescan_input.next();
/* 042 */ inmemorytablescan_numOutputRows.add(inmemorytablescan_batch.numRows());
/* 043 */ inmemorytablescan_batchIdx = 0;
/* 044 */ inmemorytablescan_colInstance0 = inmemorytablescan_batch.column(0);
/* 045 */ inmemorytablescan_colInstance1 = inmemorytablescan_batch.column(1);
/* 046 */
/* 047 */ }
/* 048 */ inmemorytablescan_scanTime1 += System.nanoTime() - getBatchStart;
/* 049 */ }
/* 050 */
/* 051 */ protected void processNext() throws java.io.IOException {
/* 052 */ if (inmemorytablescan_batch == null) {
/* 053 */ inmemorytablescan_nextBatch();
/* 054 */ }
/* 055 */ while (inmemorytablescan_batch != null) {
/* 056 */ int numRows = inmemorytablescan_batch.numRows();
/* 057 */ while (inmemorytablescan_batchIdx < numRows) {
/* 058 */ int inmemorytablescan_rowIdx = inmemorytablescan_batchIdx++;
/* 059 */ long inmemorytablescan_value = inmemorytablescan_colInstance0.getLong(inmemorytablescan_rowIdx);
/* 060 */ boolean inmemorytablescan_isNull = inmemorytablescan_colInstance1.isNullAt(inmemorytablescan_rowIdx);
/* 061 */ long inmemorytablescan_value1 = inmemorytablescan_isNull ? -1L : (inmemorytablescan_colInstance1.getLong(inmemorytablescan_rowIdx));
/* 062 */ inmemorytablescan_rowWriter.zeroOutNullBytes();
/* 063 */
/* 064 */ inmemorytablescan_rowWriter.write(0, inmemorytablescan_value);
/* 065 */
/* 066 */ if (inmemorytablescan_isNull) {
/* 067 */ inmemorytablescan_rowWriter.setNullAt(1);
/* 068 */ } else {
/* 069 */ inmemorytablescan_rowWriter.write(1, inmemorytablescan_value1);
/* 070 */ }
/* 071 */ append(inmemorytablescan_result);
/* 072 */ if (shouldStop()) return;
/* 073 */ }
/* 074 */ inmemorytablescan_batch = null;
/* 075 */ inmemorytablescan_nextBatch();
/* 076 */ }
/* 077 */ inmemorytablescan_scanTime.add(inmemorytablescan_scanTime1 / (1000 * 1000));
/* 078 */ inmemorytablescan_scanTime1 = 0;
/* 079 */ }
/* 080 */ }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment