Skip to content

Instantly share code, notes, and snippets.

View antiagainst's full-sized avatar

Lei Zhang antiagainst

View GitHub Profile

Knowledge

Endianness

  • Big-endian: the most significant byte is stored in the smallest address
  • Small-endian: the least sigificant byte is stored in the smallest address

safety and liveness

Any specification can be expressed as the conjunction of a safety property and a liveness property.

  • safety: something bad will never happen
@antiagainst
antiagainst / DevToolTips.md
Last active October 7, 2017 10:54
Tips for Development Tools

git

How to show the tracking relationship between local and remote branches?

git branch -vv

How to change remote tracking branch?

git branch --set-upstream-to origin/somebranch

// *** IR Dump After Canonicalizer ***
module {
func @while() attributes {iree.module.export} {
%cst = constant dense<1> : tensor<i32>
%cst_0 = constant dense<3> : tensor<i32>
%cst_1 = constant dense<4> : tensor<i32>
%0 = iree.do_not_optimize(%cst) : tensor<i32>
%1 = iree.do_not_optimize(%cst_0) : tensor<i32>
// *** IR Dump After mlir::mhlo::(anonymous namespace)::LegalizeControlFlowPass ***
func @pad_test() attributes {iree.module.export} {
%0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
%1 = iree.unfoldable_constant dense<0> : tensor<i32>
%2 = "mhlo.pad"(%0, %1) {edge_padding_high = dense<[1, 5]> : tensor<2xi64>, edge_padding_low = dense<[0, 1]> : tensor<2xi64>, interior_padding = dense<0> : tensor<2xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<3x9xi32>
check.expect_eq_const(%2, dense<[[0, 1, 2, 3, 0, 0, 0, 0, 0], [0, 4, 5, 6, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]> : tensor<3x9xi32>) : tensor<3x9xi32>
return
}
// *** IR Dump After mlir::iree_compiler::IREE::Flow::(anonymous namespace)::HLOToHLOPreprocessing ***
// *** IR Dump After mlir::mhlo::(anonymous namespace)::LegalizeControlFlowPass ***
func @pad_test() attributes {iree.module.export} {
%0 = iree.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
%1 = iree.unfoldable_constant dense<0> : tensor<i32>
%2 = "mhlo.pad"(%0, %1) {edge_padding_high = dense<[1, 5]> : tensor<2xi64>, edge_padding_low = dense<[0, 1]> : tensor<2xi64>, interior_padding = dense<0> : tensor<2xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<3x9xi32>
check.expect_eq_const(%2, dense<[[0, 1, 2, 3, 0, 0, 0, 0, 0], [0, 4, 5, 6, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]> : tensor<3x9xi32>) : tensor<3x9xi32>
return
}
// *** IR Dump After mlir::iree_compiler::IREE::Flow::(anonymous namespace)::HLOToHLOPreprocessing ***
// *** IR Dump After mlir::iree_compiler::IREE::SIP::MaterializeReflectionAttrsPass ***
func @conv(%arg0: tensor<1x225x225x3xf32>, %arg1: tensor<3x3x3x32xf32>) -> tensor<1x112x112x32xf32> attributes {iree.module.export, iree.reflection = {f = "I30!B13!d1d225d225d3B10!d3d3d3d32R18!B14!d1d112d112d32", fv = "1"}} {
%0 = "mhlo.convolution"(%arg0, %arg1) {batch_group_count = 1 : i64, dimension_numbers = {input_batch_dimension = 0 : i64, input_feature_dimension = 3 : i64, input_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>, kernel_input_feature_dimension = 2 : i64, kernel_output_feature_dimension = 3 : i64, kernel_spatial_dimensions = dense<[0, 1]> : tensor<2xi64>, output_batch_dimension = 0 : i64, output_feature_dimension = 3 : i64, output_spatial_dimensions = dense<[1, 2]> : tensor<2xi64>}, feature_group_count = 1 : i64, padding = dense<0> : tensor<2x2xi64>, rhs_dilation = dense<1> : tensor<2xi64>, window_strides = dense<2> : tensor<2xi64>} : (tensor<1x225x225x3xf32>, tensor<3x3x3x32xf32>) -> tensor<1x112x
This file has been truncated, but you can view the full file.
// *** IR Dump After CSE ***
func @call(%arg0: tensor<1x224x224x3xf32> {tf._user_specified_name = "x"}) -> tensor<1x1000xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, f = "I17!B13!d1d224d224d3R11!B8!d1d1000", fv = "1", sip = "I8!S5!k0_0R3!_0"}, tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf.shape<1x224x224x3>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #tf.shape<>, #t
@antiagainst
antiagainst / vulkan-1.1-compute.md
Last active June 26, 2021 21:17
vulkan-1.1-compute

Vulkan 1.1 compute API

This Gist contains a PDF file that are derived from the Khronos Group's Vulkan 1.1 API reference card. It removes graphics specific APIs to show the compute subset.

This file has been truncated, but you can view the full file.
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass //----- //
builtin.module {
flow.variable @"__iree_flow_bert/embeddings/FakeLayerNorm/beta" opaque<"_", "0xDEADBEEF"> : tensor<512xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow_bert/embeddings/FakeLayerNorm/gamma" opaque<"_", "0xDEADBEEF"> : tensor<512xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow_bert/embeddings/embedding_transformation/bias" opaque<"_", "0xDEADBEEF"> : tensor<512xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow_bert/embeddings/embedding_transformation/kernel" opaque<"_", "0xDEADBEEF"> : tensor<384x512xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow_bert/embeddings/position_embeddings" opaque<"_", "0xDEADBEEF"> : tensor<512x512xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow_bert/embeddings/token_type_embeddings" opaque<"_", "0xDEADBEEF"> : tensor<2x512xf32> attributes {sym_vis
This file has been truncated, but you can view the full file.
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass //----- //
builtin.module {
flow.variable @"__iree_flow___sm_node163__m.layer-1.kernel" opaque<"_", "0xDEADBEEF"> : tensor<3x3x3x32xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow___sm_node169__m.layer-2.gamma" opaque<"_", "0xDEADBEEF"> : tensor<32xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow___sm_node170__m.layer-2.beta" opaque<"_", "0xDEADBEEF"> : tensor<32xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow___sm_node171__m.layer-2.moving_mean" opaque<"_", "0xDEADBEEF"> : tensor<32xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow___sm_node172__m.layer-2.moving_variance" opaque<"_", "0xDEADBEEF"> : tensor<32xf32> attributes {sym_visibility = "private"}
flow.variable @"__iree_flow___sm_node181__m.layer-4.depthwise_kernel" opaque<"_", "0xDEADBEEF"> : tensor<3x3x32x1xf32> attributes {sym_visibility = "private"}
flow.variable