Skip to content

Instantly share code, notes, and snippets.

@silvasean
Created February 26, 2021 00:11
Show Gist options
  • Save silvasean/8abe63d70d24e29d6db9170ccc8d512b to your computer and use it in GitHub Desktop.
Save silvasean/8abe63d70d24e29d6db9170ccc8d512b to your computer and use it in GitHub Desktop.
module {
torch.global_slot "private" @intent_mlp.layer0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<768x768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<768x768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @intent_mlp.layer0.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @intent_mlp.layer2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<64x768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<64x768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @intent_mlp.layer2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<64xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<64xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @slot_mlp.layer0.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<768x768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<768x768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @slot_mlp.layer0.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @slot_mlp.layer2.weight : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<55x768xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<55x768xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
torch.global_slot "private" @slot_mlp.layer2.bias : !numpy.ndarray<*:!numpy.any_dtype> {
%cst = constant opaque<"", "0xDEADBEEF"> : tensor<55xf32>
%0 = numpy.create_array_from_tensor %cst : (tensor<55xf32>) -> !numpy.ndarray<*:!numpy.any_dtype>
torch.global_slot.init %0 : !numpy.ndarray<*:!numpy.any_dtype>
}
func @forward(%arg0: !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.TupleType {
%bool_false = basicpy.bool_constant false
%cst = constant 1.000000e-01 : f64
%c0_i64 = constant 0 : i64
%c9223372036854775807_i64 = constant 9223372036854775807 : i64
%c1_i64 = constant 1 : i64
%0 = torch.kernel_call "aten::dropout" %arg0, %cst, %bool_false : (!numpy.ndarray<*:!numpy.any_dtype>, f64, !basicpy.BoolType) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "float", "bool"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%1 = torch.kernel_call "aten::slice" %0, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%2 = torch.kernel_call "aten::select" %1, %c1_i64, %c0_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%3 = torch.kernel_call "aten::slice" %2, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%4 = torch.global_slot.get @intent_mlp.layer0.bias : !numpy.ndarray<*:!numpy.any_dtype>
%5 = torch.global_slot.get @intent_mlp.layer0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%6 = torch.kernel_call "aten::t" %5 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%7 = torch.kernel_call "aten::addmm" %4, %3, %6, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%8 = torch.kernel_call "aten::relu" %7 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%9 = torch.global_slot.get @intent_mlp.layer2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%10 = torch.global_slot.get @intent_mlp.layer2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%11 = torch.kernel_call "aten::t" %10 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%12 = torch.kernel_call "aten::addmm" %9, %8, %11, %c1_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Tensor", "Scalar", "Scalar"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%13 = torch.kernel_call "aten::slice" %0, %c0_i64, %c0_i64, %c9223372036854775807_i64, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, i64, i64, i64, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "int", "int?", "int?", "int"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%14 = torch.global_slot.get @slot_mlp.layer0.bias : !numpy.ndarray<*:!numpy.any_dtype>
%15 = torch.global_slot.get @slot_mlp.layer0.weight : !numpy.ndarray<*:!numpy.any_dtype>
%16 = torch.kernel_call "aten::t" %15 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%17 = torch.kernel_call "aten::matmul" %13, %16 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%18 = torch.kernel_call "aten::add_" %17, %14, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%19 = torch.kernel_call "aten::relu" %18 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%20 = torch.global_slot.get @slot_mlp.layer2.bias : !numpy.ndarray<*:!numpy.any_dtype>
%21 = torch.global_slot.get @slot_mlp.layer2.weight : !numpy.ndarray<*:!numpy.any_dtype>
%22 = torch.kernel_call "aten::t" %21 : (!numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%23 = torch.kernel_call "aten::matmul" %19, %22 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor"], sigIsMutable = false, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%24 = torch.kernel_call "aten::add_" %23, %20, %c1_i64 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>, i64) -> !numpy.ndarray<*:!numpy.any_dtype> {sigArgTypes = ["Tensor", "Tensor", "Scalar"], sigIsMutable = true, sigIsVararg = false, sigIsVarret = false, sigRetTypes = ["Tensor"]}
%25 = basicpy.build_tuple %12, %24 : (!numpy.ndarray<*:!numpy.any_dtype>, !numpy.ndarray<*:!numpy.any_dtype>) -> !basicpy.TupleType
return %25 : !basicpy.TupleType
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment