Skip to content

Instantly share code, notes, and snippets.

@JoshRosen
Created May 20, 2019 15:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save JoshRosen/416eac9d35bd027475cc787100b0bfe5 to your computer and use it in GitHub Desktop.
Save JoshRosen/416eac9d35bd027475cc787100b0bfe5 to your computer and use it in GitHub Desktop.
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIteratorForCodegenStage1(references);
/* 003 */ }
/* 004 */
/* 005 */ // codegenStageId=1
/* 006 */ final class GeneratedIteratorForCodegenStage1 extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 007 */ private Object[] references;
/* 008 */ private scala.collection.Iterator[] inputs;
/* 009 */ private boolean range_initRange_0;
/* 010 */ private long range_nextIndex_0;
/* 011 */ private TaskContext range_taskContext_0;
/* 012 */ private InputMetrics range_inputMetrics_0;
/* 013 */ private long range_batchEnd_0;
/* 014 */ private long range_numElementsTodo_0;
/* 015 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[] range_mutableStateArray_0 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[2];
/* 016 */
/* 017 */ public GeneratedIteratorForCodegenStage1(Object[] references) {
/* 018 */ this.references = references;
/* 019 */ }
/* 020 */
/* 021 */ public void init(int index, scala.collection.Iterator[] inputs) {
/* 022 */ partitionIndex = index;
/* 023 */ this.inputs = inputs;
/* 024 */
/* 025 */ range_taskContext_0 = TaskContext.get();
/* 026 */ range_inputMetrics_0 = range_taskContext_0.taskMetrics().inputMetrics();
/* 027 */ range_mutableStateArray_0[0] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0);
/* 028 */ range_mutableStateArray_0[1] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0);
/* 029 */
/* 030 */ }
/* 031 */
/* 032 */ private void initRange(int idx) {
/* 033 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 034 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(4L);
/* 035 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(10000L);
/* 036 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 037 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 038 */ long partitionEnd;
/* 039 */
/* 040 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 041 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 042 */ range_nextIndex_0 = Long.MAX_VALUE;
/* 043 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 044 */ range_nextIndex_0 = Long.MIN_VALUE;
/* 045 */ } else {
/* 046 */ range_nextIndex_0 = st.longValue();
/* 047 */ }
/* 048 */ range_batchEnd_0 = range_nextIndex_0;
/* 049 */
/* 050 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 051 */ .multiply(step).add(start);
/* 052 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 053 */ partitionEnd = Long.MAX_VALUE;
/* 054 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 055 */ partitionEnd = Long.MIN_VALUE;
/* 056 */ } else {
/* 057 */ partitionEnd = end.longValue();
/* 058 */ }
/* 059 */
/* 060 */ java.math.BigInteger startToEnd = java.math.BigInteger.valueOf(partitionEnd).subtract(
/* 061 */ java.math.BigInteger.valueOf(range_nextIndex_0));
/* 062 */ range_numElementsTodo_0 = startToEnd.divide(step).longValue();
/* 063 */ if (range_numElementsTodo_0 < 0) {
/* 064 */ range_numElementsTodo_0 = 0;
/* 065 */ } else if (startToEnd.remainder(step).compareTo(java.math.BigInteger.valueOf(0L)) != 0) {
/* 066 */ range_numElementsTodo_0++;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ protected void processNext() throws java.io.IOException {
/* 071 */ // initialize Range
/* 072 */ if (!range_initRange_0) {
/* 073 */ range_initRange_0 = true;
/* 074 */ initRange(partitionIndex);
/* 075 */ }
/* 076 */
/* 077 */ while (true) {
/* 078 */ if (range_nextIndex_0 == range_batchEnd_0) {
/* 079 */ long range_nextBatchTodo_0;
/* 080 */ if (range_numElementsTodo_0 > 1000L) {
/* 081 */ range_nextBatchTodo_0 = 1000L;
/* 082 */ range_numElementsTodo_0 -= 1000L;
/* 083 */ } else {
/* 084 */ range_nextBatchTodo_0 = range_numElementsTodo_0;
/* 085 */ range_numElementsTodo_0 = 0;
/* 086 */ if (range_nextBatchTodo_0 == 0) break;
/* 087 */ }
/* 088 */ range_batchEnd_0 += range_nextBatchTodo_0 * 1L;
/* 089 */ }
/* 090 */
/* 091 */ int range_localEnd_0 = (int)((range_batchEnd_0 - range_nextIndex_0) / 1L);
/* 092 */ for (int range_localIdx_0 = 0; range_localIdx_0 < range_localEnd_0; range_localIdx_0++) {
/* 093 */ long range_value_0 = ((long)range_localIdx_0 * 1L) + range_nextIndex_0;
/* 094 */
/* 095 */ boolean project_isNull_1 = false;
/* 096 */ int project_value_1 = -1;
/* 097 */ if (!false) {
/* 098 */ project_value_1 = (int) range_value_0;
/* 099 */ }
/* 100 */ Integer project_arg_0 = project_isNull_1 ? null : project_value_1;
/* 101 */
/* 102 */ Integer project_result_0 = null;
/* 103 */ try {
/* 104 */ project_result_0 = (Integer)((scala.Function1) references[3] /* udf */).apply(project_arg_0);
/* 105 */ } catch (Exception e) {
/* 106 */ throw new org.apache.spark.SparkException(((java.lang.String) references[2] /* errMsg */), e);
/* 107 */ }
/* 108 */
/* 109 */ boolean project_isNull_0 = project_result_0 == null;
/* 110 */ int project_value_0 = -1;
/* 111 */ if (!project_isNull_0) {
/* 112 */ project_value_0 = project_result_0;
/* 113 */ }
/* 114 */ range_mutableStateArray_0[1].reset();
/* 115 */
/* 116 */ range_mutableStateArray_0[1].zeroOutNullBytes();
/* 117 */
/* 118 */ range_mutableStateArray_0[1].write(0, project_value_0);
/* 119 */ append((range_mutableStateArray_0[1].getRow()));
/* 120 */
/* 121 */ if (shouldStop()) {
/* 122 */ range_nextIndex_0 = range_value_0 + 1L;
/* 123 */ ((org.apache.spark.sql.execution.metric.SQLMetric) references[0] /* numOutputRows */).add(range_localIdx_0 + 1);
/* 124 */ range_inputMetrics_0.incRecordsRead(range_localIdx_0 + 1);
/* 125 */ return;
/* 126 */ }
/* 127 */
/* 128 */ }
/* 129 */ range_nextIndex_0 = range_batchEnd_0;
/* 130 */ ((org.apache.spark.sql.execution.metric.SQLMetric) references[0] /* numOutputRows */).add(range_localEnd_0);
/* 131 */ range_inputMetrics_0.incRecordsRead(range_localEnd_0);
/* 132 */ range_taskContext_0.killTaskIfInterrupted();
/* 133 */ }
/* 134 */ }
/* 135 */
/* 136 */ }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment