Skip to content

Instantly share code, notes, and snippets.

@masahi
Created March 15, 2021 20:40
Show Gist options
  • Save masahi/1142cfae30344fcc3fa6a6182d0b8c40 to your computer and use it in GitHub Desktop.
Save masahi/1142cfae30344fcc3fa6a6182d0b8c40 to your computer and use it in GitHub Desktop.
type Option[A] {
Some(A),
None,
}
type static_tensor_float32_2_4_t {
tensor_nil_float32_2_4,
tensor_constructor_float32_2_4(Tensor[(2, 4), float32]),
}
type static_tensor_float32_any_2_4_t {
tensor_nil_float32_any_2_4,
tensor_constructor_float32_any_2_4(Tensor[(?, 2, 4), float32]),
}
type tensor_int64_t {
tensor_nil_int64,
tensor0_int64(int64),
tensor1_int64(Tensor[(?), int64]),
tensor2_int64(Tensor[(?, ?), int64]),
tensor3_int64(Tensor[(?, ?, ?), int64]),
tensor4_int64(Tensor[(?, ?, ?, ?), int64]),
tensor5_int64(Tensor[(?, ?, ?, ?, ?), int64]),
tensor6_int64(Tensor[(?, ?, ?, ?, ?, ?), int64]),
}
type static_tensor_float32_4_t {
tensor_nil_float32_4,
tensor_constructor_float32_4(Tensor[(4), float32]),
}
type List[A] {
Cons(A, List[A]),
Nil,
}
type tensor_uint16_t {
tensor_nil_uint16,
tensor0_uint16(uint16),
tensor1_uint16(Tensor[(?), uint16]),
tensor2_uint16(Tensor[(?, ?), uint16]),
tensor3_uint16(Tensor[(?, ?, ?), uint16]),
tensor4_uint16(Tensor[(?, ?, ?, ?), uint16]),
tensor5_uint16(Tensor[(?, ?, ?, ?, ?), uint16]),
tensor6_uint16(Tensor[(?, ?, ?, ?, ?, ?), uint16]),
}
type static_tensor_float32_any_4_t {
tensor_nil_float32_any_4,
tensor_constructor_float32_any_4(Tensor[(?, 4), float32]),
}
type tensor_uint8_t {
tensor_nil_uint8,
tensor0_uint8(uint8),
tensor1_uint8(Tensor[(?), uint8]),
tensor2_uint8(Tensor[(?, ?), uint8]),
tensor3_uint8(Tensor[(?, ?, ?), uint8]),
tensor4_uint8(Tensor[(?, ?, ?, ?), uint8]),
tensor5_uint8(Tensor[(?, ?, ?, ?, ?), uint8]),
tensor6_uint8(Tensor[(?, ?, ?, ?, ?, ?), uint8]),
}
type tensor_float64_t {
tensor_nil_float64,
tensor0_float64(float64),
tensor1_float64(Tensor[(?), float64]),
tensor2_float64(Tensor[(?, ?), float64]),
tensor3_float64(Tensor[(?, ?, ?), float64]),
tensor4_float64(Tensor[(?, ?, ?, ?), float64]),
tensor5_float64(Tensor[(?, ?, ?, ?, ?), float64]),
tensor6_float64(Tensor[(?, ?, ?, ?, ?, ?), float64]),
}
type tensor_int16_t {
tensor_nil_int16,
tensor0_int16(int16),
tensor1_int16(Tensor[(?), int16]),
tensor2_int16(Tensor[(?, ?), int16]),
tensor3_int16(Tensor[(?, ?, ?), int16]),
tensor4_int16(Tensor[(?, ?, ?, ?), int16]),
tensor5_int16(Tensor[(?, ?, ?, ?, ?), int16]),
tensor6_int16(Tensor[(?, ?, ?, ?, ?, ?), int16]),
}
type Tree[A] {
Rose(A, List[Tree[A]]),
}
type tensor_int32_t {
tensor_nil_int32,
tensor0_int32(int32),
tensor1_int32(Tensor[(?), int32]),
tensor2_int32(Tensor[(?, ?), int32]),
tensor3_int32(Tensor[(?, ?, ?), int32]),
tensor4_int32(Tensor[(?, ?, ?, ?), int32]),
tensor5_int32(Tensor[(?, ?, ?, ?, ?), int32]),
tensor6_int32(Tensor[(?, ?, ?, ?, ?, ?), int32]),
}
type tensor_float32_t {
tensor_nil_float32,
tensor0_float32(float32),
tensor1_float32(Tensor[(?), float32]),
tensor2_float32(Tensor[(?, ?), float32]),
tensor3_float32(Tensor[(?, ?, ?), float32]),
tensor4_float32(Tensor[(?, ?, ?, ?), float32]),
tensor5_float32(Tensor[(?, ?, ?, ?, ?), float32]),
tensor6_float32(Tensor[(?, ?, ?, ?, ?, ?), float32]),
}
type tensor_float16_t {
tensor_nil_float16,
tensor0_float16(float16),
tensor1_float16(Tensor[(?), float16]),
tensor2_float16(Tensor[(?, ?), float16]),
tensor3_float16(Tensor[(?, ?, ?), float16]),
tensor4_float16(Tensor[(?, ?, ?, ?), float16]),
tensor5_float16(Tensor[(?, ?, ?, ?, ?), float16]),
tensor6_float16(Tensor[(?, ?, ?, ?, ?, ?), float16]),
}
type tensor_int8_t {
tensor_nil_int8,
tensor0_int8(int8),
tensor1_int8(Tensor[(?), int8]),
tensor2_int8(Tensor[(?, ?), int8]),
tensor3_int8(Tensor[(?, ?, ?), int8]),
tensor4_int8(Tensor[(?, ?, ?, ?), int8]),
tensor5_int8(Tensor[(?, ?, ?, ?, ?), int8]),
tensor6_int8(Tensor[(?, ?, ?, ?, ?, ?), int8]),
}
type static_tensor_float32_any_any_2_4_t {
tensor_nil_float32_any_any_2_4,
tensor_constructor_float32_any_any_2_4(Tensor[(?, ?, 2, 4), float32]),
}
def @hd[A](%xs: List[A]) -> A {
match? (%xs) {
Cons(%x: A, _) => {
%x
},
}
}
def @main(%states: (Tensor[(2, 4), float32], Tensor[(2, 4), float32]), %input: Tensor[(5, 2, 3), float32], %cell.weight_ih: Tensor[(16, 3), float32], %cell.layernorm_i.weight: Tensor[(16), float32], %cell.layernorm_i.bias: Tensor[(16), float32], %cell.weight_hh: Tensor[(16, 4), float32], %cell.layernorm_h.weight: Tensor[(16), float32], %cell.layernorm_h.bias: Tensor[(16), float32], %cell.layernorm_c.weight: Tensor[(4), float32], %cell.layernorm_c.bias: Tensor[(4), float32]) {
%0 = Nil;
%36 = (
let %while_loop = fn (%i.1: int32, %outputs.6: List[meta[IncompleteType][0]], %state.6: (Tensor[(2, 4), float32], Tensor[(2, 4), float32]), %input.1: Tensor[(5, 2, 3), float32]) {
%1 = less(%i.1, 5);
if (%1) {
%2 = add(%i.1, 1);
%3 = take(%input.1, %i.1, axis=0, mode="wrap");
%4 = transpose(%cell.weight_ih, axes=[1, 0]);
%5 = transpose(%4, axes=[1, 0]);
%6 = nn.dense(%3, %5, units=None);
%7 = nn.layer_norm(%6, %cell.layernorm_i.weight, %cell.layernorm_i.bias);
%8 = %state.6.0;
%9 = transpose(%cell.weight_hh, axes=[1, 0]);
%10 = transpose(%9, axes=[1, 0]);
%11 = nn.dense(%8, %10, units=None);
%12 = nn.layer_norm(%11, %cell.layernorm_h.weight, %cell.layernorm_h.bias);
%13 = add(%7, %12);
%14 = strided_slice(%13, begin=[0, 12], end=[2, 16], strides=[1, 1]);
%15 = sigmoid(%14);
%16 = strided_slice(%13, begin=[0, 4], end=[2, 8], strides=[1, 1]);
%17 = sigmoid(%16);
%18 = %state.6.1;
%19 = multiply(%17, %18);
%20 = strided_slice(%13, begin=[0, 0], end=[2, 4], strides=[1, 1]);
%21 = sigmoid(%20);
%22 = strided_slice(%13, begin=[0, 8], end=[2, 12], strides=[1, 1]);
%23 = tanh(%22);
%24 = multiply(%21, %23);
%25 = add(%19, %24);
%26 = nn.layer_norm(%25, %cell.layernorm_c.weight, %cell.layernorm_c.bias);
%27 = tanh(%26);
%28 = multiply(%15, %27);
%29 = (%28, %26);
%30 = (%28, %29);
%31 = %30.0;
%32 = Nil;
%33 = Cons(%31, %32);
%34 = @concat(%outputs.6, %33);
%35 = %30.1;
%while_loop(%2, %34, %35, %input.1)
} else {
(%i.1, %outputs.6, %state.6, %input.1)
}
};
%while_loop
);
%37 = %36(0, %0, %states, %input);
%38 = %37.1;
%39 = @map(tensor_constructor_float32_2_4(Tensor[(2, 4), float32]), %38);
%40 = @tensor_array_stack_float32_2_4(%39);
%41 = @tensor_get_data_float32_any_2_4(%40);
%42 = %37.2;
(%41, %42)
}
def @tensor_array_stack_float32_2_4(%tensor_array: List[static_tensor_float32_2_4_t[]]) -> static_tensor_float32_any_2_4_t[] {
%43 = @map(@tensor_expand_dims_float32_2_4, %tensor_array);
%44 = @hd(%43);
%45 = @tl(%43);
@foldl(@tensor_concatenate_float32_any_2_4, %44, %45)
}
def @concat[A](%xs1: List[A], %ys: List[A]) -> List[A] {
@foldr(Cons, %ys, %xs1) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=List[A] */
}
def @map[A, B](%f: fn (A) -> B, %xs2: List[A]) -> List[B] {
match (%xs2) {
Cons(%x1: A, %rest: List[A]) => {
%46 = %f(%x1) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=B */;
%47 = @map(%f, %rest) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=List[B] */;
Cons(%46, %47) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=List[B] */
},
Nil => {
Nil /* ty=List[B] */
},
}
}
def @tensor_expand_dims_float32_2_4(%x2: static_tensor_float32_2_4_t[]) -> static_tensor_float32_any_2_4_t[] {
match? (%x2) {
tensor_constructor_float32_2_4(%t) => {
%48 = expand_dims(%t, axis=0);
tensor_constructor_float32_any_2_4(%48)
},
}
}
def @foldl[A, B](%f1: fn (A, B) -> A, %acc: A, %xs3: List[B]) -> A {
match (%xs3) {
Cons(%x3: B, %rest1: List[B]) => {
%49 = %f1(%acc, %x3) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=A */;
@foldl(%f1, %49, %rest1) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=A */
},
Nil => {
%acc
},
}
}
def @tl[A](%xs4: List[A]) -> List[A] {
match? (%xs4) {
Cons(_, %rest2: List[A]) => {
%rest2
},
}
}
def @tensor_get_data_float32_any_2_4(%tensor: static_tensor_float32_any_2_4_t[]) -> Tensor[(?, 2, 4), float32] {
match? (%tensor) {
tensor_constructor_float32_any_2_4(%t1) => {
%t1
},
}
}
def @tensor_concatenate_float32_any_2_4(%x4: static_tensor_float32_any_2_4_t[], %y: static_tensor_float32_any_2_4_t[]) -> static_tensor_float32_any_2_4_t[] {
match? (%x4) {
tensor_constructor_float32_any_2_4(%t11) => {
match? (%y) {
tensor_constructor_float32_any_2_4(%t2) => {
%50 = (%t11, %t2);
%51 = concatenate(%50);
tensor_constructor_float32_any_2_4(%51)
},
}
},
}
}
def @foldr[A, B](%f2: fn (A, B) -> B, %acc1: B, %xs5: List[A]) -> B {
match (%xs5) {
Cons(%x5: A, %rest3: List[A]) => {
%52 = @foldr(%f2, %acc1, %rest3) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=B */;
%f2(%x5, %52) /* /home/masa/projects/dev/tvm/python/tvm/relay/std/prelude.rly */ /* ty=B */
},
Nil => {
%acc1
},
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment