Skip to content

Instantly share code, notes, and snippets.

@kisimple
Last active March 17, 2017 06:51
Show Gist options
  • Save kisimple/aa928f06f0803744be379620d4ffcc89 to your computer and use it in GitHub Desktop.
Save kisimple/aa928f06f0803744be379620d4ffcc89 to your computer and use it in GitHub Desktop.
//// spark.sql("SELECT name FROM people WHERE age > 18 ORDER BY age LIMIT 20")
package org.apache.spark.sql.catalyst.expressions;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.catalyst.expressions.UnsafeRow;
import org.apache.spark.unsafe.types.UTF8String;
public class GeneratedClass extends
org.apache.spark.sql.catalyst.expressions.codegen.GeneratedClass {
public Object generate(Object[] references) {
return new GeneratedIterator(references);
}
final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
private Object[] references;
private scala.collection.Iterator[] inputs;
private org.apache.spark.sql.execution.metric.SQLMetric scan_numOutputRows;
private scala.collection.Iterator scan_input;
private org.apache.spark.sql.execution.metric.SQLMetric filter_numOutputRows;
private UnsafeRow filter_result;
private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder filter_holder;
private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter filter_rowWriter;
private UnsafeRow project_result;
private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder project_holder;
private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter project_rowWriter;
public GeneratedIterator(Object[] references) {
this.references = references;
}
public void init(int index, scala.collection.Iterator[] inputs) {
partitionIndex = index;
this.inputs = inputs;
this.scan_numOutputRows = (org.apache.spark.sql.execution.metric.SQLMetric) references[0];
scan_input = inputs[0];
this.filter_numOutputRows = (org.apache.spark.sql.execution.metric.SQLMetric) references[1];
filter_result = new UnsafeRow(2);
this.filter_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(filter_result, 32);
this.filter_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(filter_holder, 2);
project_result = new UnsafeRow(2);
this.project_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(project_result, 32);
this.project_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(project_holder, 2);
}
protected void processNext() throws java.io.IOException {
while (scan_input.hasNext()) {
InternalRow scan_row = (InternalRow) scan_input.next();
scan_numOutputRows.add(1);
boolean scan_isNull2 = scan_row.isNullAt(0);
long scan_value2 = scan_isNull2 ? -1L : (scan_row.getLong(0));
if (!(!(scan_isNull2))) continue;
boolean filter_isNull2 = false;
boolean filter_value2 = false;
filter_value2 = scan_value2 > 18L;
if (!filter_value2) continue;
filter_numOutputRows.add(1);
boolean scan_isNull3 = scan_row.isNullAt(1);
UTF8String scan_value3 = scan_isNull3 ? null : (scan_row.getUTF8String(1));
project_holder.reset();
project_rowWriter.zeroOutNullBytes();
if (scan_isNull3) {
project_rowWriter.setNullAt(0);
} else {
project_rowWriter.write(0, scan_value3);
}
project_rowWriter.write(1, scan_value2);
project_result.setTotalSize(project_holder.totalSize());
append(project_result);
if (shouldStop()) return;
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment