Skip to content

Instantly share code, notes, and snippets.

@tmandry
Created January 4, 2024 23:43
cargo expand output for rust-lang/rust#119593
#![feature(prelude_import)]
#[prelude_import]
use std::prelude::rust_2021::*;
#[macro_use]
extern crate std;
mod args {
use crate::{
error::ParseWarning, fxt_builder::FxtBuilder, session::ResolveCtx,
string::{StringRef, STRING_REF_INLINE_BIT},
trace_header, ParseError, ParseResult,
};
use flyweights::FlyStr;
use nom::number::complete::{le_f64, le_i64, le_u64};
pub struct Arg {
pub name: FlyStr,
pub value: ArgValue,
}
#[automatically_derived]
impl ::core::clone::Clone for Arg {
#[inline]
fn clone(&self) -> Arg {
Arg {
name: ::core::clone::Clone::clone(&self.name),
value: ::core::clone::Clone::clone(&self.value),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for Arg {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"Arg",
"name",
&self.name,
"value",
&&self.value,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for Arg {}
#[automatically_derived]
impl ::core::cmp::PartialEq for Arg {
#[inline]
fn eq(&self, other: &Arg) -> bool {
self.name == other.name && self.value == other.value
}
}
impl Arg {
pub(crate) fn resolve_n(
ctx: &mut ResolveCtx,
raw: Vec<RawArg<'_>>,
) -> Vec<Self> {
raw.into_iter().filter_map(|a| Self::resolve(ctx, a)).collect()
}
fn resolve(ctx: &mut ResolveCtx, raw: RawArg<'_>) -> Option<Self> {
let name = ctx.resolve_str(raw.name);
if let Some(value) = ArgValue::resolve(ctx, raw.value) {
Some(Self { name, value })
} else {
ctx.add_warning(ParseWarning::SkippingArgWithUnknownType {
name,
});
None
}
}
}
pub struct RawArg<'a> {
pub name: StringRef<'a>,
pub value: RawArgValue<'a>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawArg<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"RawArg",
"name",
&self.name,
"value",
&&self.value,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawArg<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawArg<'a> {
#[inline]
fn eq(&self, other: &RawArg<'a>) -> bool {
self.name == other.name && self.value == other.value
}
}
#[automatically_derived]
impl<'a> ::core::clone::Clone for RawArg<'a> {
#[inline]
fn clone(&self) -> RawArg<'a> {
RawArg {
name: ::core::clone::Clone::clone(&self.name),
value: ::core::clone::Clone::clone(&self.value),
}
}
}
impl<'a> RawArg<'a> {
pub(crate) fn parse_n(count: u8, buf: &'a [u8]) -> ParseResult<'a, Vec<Self>> {
nom::multi::count(Self::parse, count as usize)(buf)
}
pub(crate) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
use nom::combinator::map;
let (buf, base_header) = BaseArgHeader::parse(buf)?;
let (rem, payload) = base_header.take_payload(buf)?;
let (payload, name) = StringRef::parse(base_header.name_ref(), payload)?;
let arg_ty = base_header.raw_type();
let (empty, value) = match arg_ty {
NULL_ARG_TYPE => Ok((payload, RawArgValue::Null)),
BOOL_ARG_TYPE => {
let header = BoolHeader::new(base_header.0)
.map_err(nom::Err::Failure)?;
Ok((payload, RawArgValue::Boolean(header.value() != 0)))
}
I32_ARG_TYPE => {
let header = I32Header::new(base_header.0)
.map_err(nom::Err::Failure)?;
Ok((payload, RawArgValue::Signed32(header.value())))
}
U32_ARG_TYPE => {
let header = U32Header::new(base_header.0)
.map_err(nom::Err::Failure)?;
Ok((payload, RawArgValue::Unsigned32(header.value())))
}
I64_ARG_TYPE => map(le_i64, |i| RawArgValue::Signed64(i))(payload),
U64_ARG_TYPE => map(le_u64, |u| RawArgValue::Unsigned64(u))(payload),
F64_ARG_TYPE => map(le_f64, |f| RawArgValue::Double(f))(payload),
STR_ARG_TYPE => {
let header = StringHeader::new(base_header.0)
.map_err(nom::Err::Failure)?;
map(
move |b| StringRef::parse(header.value_ref(), b),
|s| RawArgValue::String(s),
)(payload)
}
PTR_ARG_TYPE => map(le_u64, |p| RawArgValue::Pointer(p))(payload),
KOBJ_ARG_TYPE => map(le_u64, |k| RawArgValue::KernelObj(k))(payload),
unknown => {
Ok((
&[][..],
RawArgValue::Unknown {
raw_type: unknown,
bytes: payload,
},
))
}
}?;
if empty.is_empty() {
Ok((rem, Self { name, value }))
} else {
Err(nom::Err::Failure(ParseError::InvalidSize))
}
}
pub(crate) fn serialize(&self) -> Result<Vec<u8>, String> {
let arg_name_ref = match self.name {
StringRef::Index(id) => id.into(),
StringRef::Inline(name) => name.len() as u16 | STRING_REF_INLINE_BIT,
StringRef::Empty => {
return Err("Argument is missing a name.".to_string());
}
};
match &self.value {
RawArgValue::Null => {
let mut header = NullHeader::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.build())
}
RawArgValue::Boolean(val) => {
let mut header = BoolHeader::empty();
header.set_name_ref(arg_name_ref);
header.set_value(*val as u8);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.build())
}
RawArgValue::Signed32(val) => {
let mut header = I32Header::empty();
header.set_name_ref(arg_name_ref);
header.set_value(*val);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.build())
}
RawArgValue::Unsigned32(val) => {
let mut header = U32Header::empty();
header.set_name_ref(arg_name_ref);
header.set_value(*val);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.build())
}
RawArgValue::Signed64(val) => {
let mut header = I64Header::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(val.to_le_bytes()).build())
}
RawArgValue::Unsigned64(val) => {
let mut header = U64Header::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(val.to_le_bytes()).build())
}
RawArgValue::Double(val) => {
let mut header = F64Header::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(val.to_le_bytes()).build())
}
RawArgValue::String(str_val) => {
let mut header = StringHeader::empty();
header.set_name_ref(arg_name_ref);
header
.set_value_ref(
match str_val {
StringRef::Index(id) => (*id).into(),
StringRef::Inline(val) => {
val.len() as u16 | STRING_REF_INLINE_BIT
}
StringRef::Empty => 0u16,
},
);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
if let StringRef::Inline(value_str) = str_val {
builder = builder.atom(value_str);
}
Ok(builder.build())
}
RawArgValue::Pointer(val) => {
let mut header = PtrHeader::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(val.to_le_bytes()).build())
}
RawArgValue::KernelObj(val) => {
let mut header = KobjHeader::empty();
header.set_name_ref(arg_name_ref);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(val.to_le_bytes()).build())
}
RawArgValue::Unknown { raw_type, bytes } => {
let mut header = BaseArgHeader::empty();
header.set_raw_type(*raw_type);
let mut builder = FxtBuilder::new(header);
if let StringRef::Inline(name_str) = self.name {
builder = builder.atom(name_str);
}
Ok(builder.atom(bytes).build())
}
}
}
}
pub enum ArgValue {
Null,
Boolean(bool),
Signed32(i32),
Unsigned32(u32),
Signed64(i64),
Unsigned64(u64),
Double(f64),
String(FlyStr),
Pointer(u64),
KernelObj(u64),
}
#[automatically_derived]
impl ::core::clone::Clone for ArgValue {
#[inline]
fn clone(&self) -> ArgValue {
match self {
ArgValue::Null => ArgValue::Null,
ArgValue::Boolean(__self_0) => {
ArgValue::Boolean(::core::clone::Clone::clone(__self_0))
}
ArgValue::Signed32(__self_0) => {
ArgValue::Signed32(::core::clone::Clone::clone(__self_0))
}
ArgValue::Unsigned32(__self_0) => {
ArgValue::Unsigned32(::core::clone::Clone::clone(__self_0))
}
ArgValue::Signed64(__self_0) => {
ArgValue::Signed64(::core::clone::Clone::clone(__self_0))
}
ArgValue::Unsigned64(__self_0) => {
ArgValue::Unsigned64(::core::clone::Clone::clone(__self_0))
}
ArgValue::Double(__self_0) => {
ArgValue::Double(::core::clone::Clone::clone(__self_0))
}
ArgValue::String(__self_0) => {
ArgValue::String(::core::clone::Clone::clone(__self_0))
}
ArgValue::Pointer(__self_0) => {
ArgValue::Pointer(::core::clone::Clone::clone(__self_0))
}
ArgValue::KernelObj(__self_0) => {
ArgValue::KernelObj(::core::clone::Clone::clone(__self_0))
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ArgValue {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ArgValue::Null => ::core::fmt::Formatter::write_str(f, "Null"),
ArgValue::Boolean(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Boolean",
&__self_0,
)
}
ArgValue::Signed32(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Signed32",
&__self_0,
)
}
ArgValue::Unsigned32(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unsigned32",
&__self_0,
)
}
ArgValue::Signed64(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Signed64",
&__self_0,
)
}
ArgValue::Unsigned64(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unsigned64",
&__self_0,
)
}
ArgValue::Double(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Double",
&__self_0,
)
}
ArgValue::String(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"String",
&__self_0,
)
}
ArgValue::Pointer(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Pointer",
&__self_0,
)
}
ArgValue::KernelObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"KernelObj",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ArgValue {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ArgValue {
#[inline]
fn eq(&self, other: &ArgValue) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(ArgValue::Boolean(__self_0), ArgValue::Boolean(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Signed32(__self_0), ArgValue::Signed32(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Unsigned32(__self_0), ArgValue::Unsigned32(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Signed64(__self_0), ArgValue::Signed64(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Unsigned64(__self_0), ArgValue::Unsigned64(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Double(__self_0), ArgValue::Double(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::String(__self_0), ArgValue::String(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::Pointer(__self_0), ArgValue::Pointer(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ArgValue::KernelObj(__self_0), ArgValue::KernelObj(__arg1_0)) => {
*__self_0 == *__arg1_0
}
_ => true,
}
}
}
impl ArgValue {
pub fn is_null(&self) -> bool {
match self {
Self::Null => true,
_ => false,
}
}
pub fn boolean(&self) -> Option<bool> {
match self {
Self::Boolean(b) => Some(*b),
_ => None,
}
}
pub fn signed_32(&self) -> Option<i32> {
match self {
Self::Signed32(n) => Some(*n),
_ => None,
}
}
pub fn unsigned_32(&self) -> Option<u32> {
match self {
Self::Unsigned32(n) => Some(*n),
_ => None,
}
}
pub fn signed_64(&self) -> Option<i64> {
match self {
Self::Signed64(n) => Some(*n),
_ => None,
}
}
pub fn unsigned_64(&self) -> Option<u64> {
match self {
Self::Unsigned64(n) => Some(*n),
_ => None,
}
}
pub fn double(&self) -> Option<f64> {
match self {
Self::Double(n) => Some(*n),
_ => None,
}
}
pub fn string(&self) -> Option<&str> {
match self {
Self::String(s) => Some(s.as_str()),
_ => None,
}
}
pub fn pointer(&self) -> Option<u64> {
match self {
Self::Pointer(p) => Some(*p),
_ => None,
}
}
pub fn kernel_obj(&self) -> Option<u64> {
match self {
Self::KernelObj(k) => Some(*k),
_ => None,
}
}
fn resolve(ctx: &mut ResolveCtx, raw: RawArgValue<'_>) -> Option<Self> {
Some(
match raw {
RawArgValue::Null => ArgValue::Null,
RawArgValue::Boolean(b) => ArgValue::Boolean(b),
RawArgValue::Signed32(s) => ArgValue::Signed32(s),
RawArgValue::Unsigned32(u) => ArgValue::Unsigned32(u),
RawArgValue::Signed64(s) => ArgValue::Signed64(s),
RawArgValue::Unsigned64(u) => ArgValue::Unsigned64(u),
RawArgValue::Double(f) => ArgValue::Double(f),
RawArgValue::String(s) => ArgValue::String(ctx.resolve_str(s)),
RawArgValue::Pointer(p) => ArgValue::Pointer(p),
RawArgValue::KernelObj(k) => ArgValue::KernelObj(k),
RawArgValue::Unknown { .. } => {
return None;
}
},
)
}
}
pub enum RawArgValue<'a> {
Null,
Boolean(bool),
Signed32(i32),
Unsigned32(u32),
Signed64(i64),
Unsigned64(u64),
Double(f64),
String(StringRef<'a>),
Pointer(u64),
KernelObj(u64),
Unknown { raw_type: u8, bytes: &'a [u8] },
}
#[automatically_derived]
impl<'a> ::core::clone::Clone for RawArgValue<'a> {
#[inline]
fn clone(&self) -> RawArgValue<'a> {
match self {
RawArgValue::Null => RawArgValue::Null,
RawArgValue::Boolean(__self_0) => {
RawArgValue::Boolean(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Signed32(__self_0) => {
RawArgValue::Signed32(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Unsigned32(__self_0) => {
RawArgValue::Unsigned32(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Signed64(__self_0) => {
RawArgValue::Signed64(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Unsigned64(__self_0) => {
RawArgValue::Unsigned64(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Double(__self_0) => {
RawArgValue::Double(::core::clone::Clone::clone(__self_0))
}
RawArgValue::String(__self_0) => {
RawArgValue::String(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Pointer(__self_0) => {
RawArgValue::Pointer(::core::clone::Clone::clone(__self_0))
}
RawArgValue::KernelObj(__self_0) => {
RawArgValue::KernelObj(::core::clone::Clone::clone(__self_0))
}
RawArgValue::Unknown { raw_type: __self_0, bytes: __self_1 } => {
RawArgValue::Unknown {
raw_type: ::core::clone::Clone::clone(__self_0),
bytes: ::core::clone::Clone::clone(__self_1),
}
}
}
}
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawArgValue<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
RawArgValue::Null => ::core::fmt::Formatter::write_str(f, "Null"),
RawArgValue::Boolean(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Boolean",
&__self_0,
)
}
RawArgValue::Signed32(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Signed32",
&__self_0,
)
}
RawArgValue::Unsigned32(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unsigned32",
&__self_0,
)
}
RawArgValue::Signed64(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Signed64",
&__self_0,
)
}
RawArgValue::Unsigned64(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unsigned64",
&__self_0,
)
}
RawArgValue::Double(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Double",
&__self_0,
)
}
RawArgValue::String(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"String",
&__self_0,
)
}
RawArgValue::Pointer(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Pointer",
&__self_0,
)
}
RawArgValue::KernelObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"KernelObj",
&__self_0,
)
}
RawArgValue::Unknown { raw_type: __self_0, bytes: __self_1 } => {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"Unknown",
"raw_type",
__self_0,
"bytes",
&__self_1,
)
}
}
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawArgValue<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawArgValue<'a> {
#[inline]
fn eq(&self, other: &RawArgValue<'a>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(RawArgValue::Boolean(__self_0), RawArgValue::Boolean(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
RawArgValue::Signed32(__self_0),
RawArgValue::Signed32(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawArgValue::Unsigned32(__self_0),
RawArgValue::Unsigned32(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawArgValue::Signed64(__self_0),
RawArgValue::Signed64(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawArgValue::Unsigned64(__self_0),
RawArgValue::Unsigned64(__arg1_0),
) => *__self_0 == *__arg1_0,
(RawArgValue::Double(__self_0), RawArgValue::Double(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawArgValue::String(__self_0), RawArgValue::String(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawArgValue::Pointer(__self_0), RawArgValue::Pointer(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
RawArgValue::KernelObj(__self_0),
RawArgValue::KernelObj(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawArgValue::Unknown { raw_type: __self_0, bytes: __self_1 },
RawArgValue::Unknown { raw_type: __arg1_0, bytes: __arg1_1 },
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
_ => true,
}
}
}
pub(crate) const NULL_ARG_TYPE: u8 = 0;
pub(crate) const I32_ARG_TYPE: u8 = 1;
pub(crate) const U32_ARG_TYPE: u8 = 2;
pub(crate) const I64_ARG_TYPE: u8 = 3;
pub(crate) const U64_ARG_TYPE: u8 = 4;
pub(crate) const F64_ARG_TYPE: u8 = 5;
pub(crate) const STR_ARG_TYPE: u8 = 6;
pub(crate) const PTR_ARG_TYPE: u8 = 7;
pub(crate) const KOBJ_ARG_TYPE: u8 = 8;
pub(crate) const BOOL_ARG_TYPE: u8 = 9;
pub struct BaseArgHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for BaseArgHeader {
#[inline]
fn clone(&self) -> BaseArgHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for BaseArgHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for BaseArgHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for BaseArgHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BaseArgHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BaseArgHeader {
#[inline]
fn eq(&self, other: &BaseArgHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for BaseArgHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for BaseArgHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for BaseArgHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("BaseArgHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl BaseArgHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for BaseArgHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct NullHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for NullHeader {
#[inline]
fn clone(&self) -> NullHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for NullHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for NullHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for NullHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for NullHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for NullHeader {
#[inline]
fn eq(&self, other: &NullHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for NullHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for NullHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for NullHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("NullHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl NullHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(NULL_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != NULL_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "NullHeader",
expected: NULL_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for NullHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct I32Header(u64);
#[automatically_derived]
impl ::core::clone::Clone for I32Header {
#[inline]
fn clone(&self) -> I32Header {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for I32Header {}
#[automatically_derived]
impl ::core::marker::StructuralEq for I32Header {}
#[automatically_derived]
impl ::core::cmp::Eq for I32Header {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for I32Header {}
#[automatically_derived]
impl ::core::cmp::PartialEq for I32Header {
#[inline]
fn eq(&self, other: &I32Header) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for I32Header
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for I32Header
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for I32Header {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("I32Header");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("value", &self.value());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl I32Header {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn value(&self) -> i32 {
use ::bitfield::BitRange;
let raw_value: i32 = self.bit_range(63, 32);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_value(&mut self, value: i32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 32, ::bitfield::Into::<i32>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(I32_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != I32_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "I32Header",
expected: I32_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for I32Header {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct U32Header(u64);
#[automatically_derived]
impl ::core::clone::Clone for U32Header {
#[inline]
fn clone(&self) -> U32Header {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for U32Header {}
#[automatically_derived]
impl ::core::marker::StructuralEq for U32Header {}
#[automatically_derived]
impl ::core::cmp::Eq for U32Header {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for U32Header {}
#[automatically_derived]
impl ::core::cmp::PartialEq for U32Header {
#[inline]
fn eq(&self, other: &U32Header) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for U32Header
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for U32Header
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for U32Header {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("U32Header");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("value", &self.value());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl U32Header {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn value(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(63, 32);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_value(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 32, ::bitfield::Into::<u32>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(U32_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != U32_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "U32Header",
expected: U32_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for U32Header {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct I64Header(u64);
#[automatically_derived]
impl ::core::clone::Clone for I64Header {
#[inline]
fn clone(&self) -> I64Header {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for I64Header {}
#[automatically_derived]
impl ::core::marker::StructuralEq for I64Header {}
#[automatically_derived]
impl ::core::cmp::Eq for I64Header {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for I64Header {}
#[automatically_derived]
impl ::core::cmp::PartialEq for I64Header {
#[inline]
fn eq(&self, other: &I64Header) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for I64Header
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for I64Header
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for I64Header {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("I64Header");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl I64Header {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(I64_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != I64_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "I64Header",
expected: I64_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for I64Header {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct U64Header(u64);
#[automatically_derived]
impl ::core::clone::Clone for U64Header {
#[inline]
fn clone(&self) -> U64Header {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for U64Header {}
#[automatically_derived]
impl ::core::marker::StructuralEq for U64Header {}
#[automatically_derived]
impl ::core::cmp::Eq for U64Header {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for U64Header {}
#[automatically_derived]
impl ::core::cmp::PartialEq for U64Header {
#[inline]
fn eq(&self, other: &U64Header) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for U64Header
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for U64Header
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for U64Header {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("U64Header");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl U64Header {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(U64_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != U64_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "U64Header",
expected: U64_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for U64Header {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct F64Header(u64);
#[automatically_derived]
impl ::core::clone::Clone for F64Header {
#[inline]
fn clone(&self) -> F64Header {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for F64Header {}
#[automatically_derived]
impl ::core::marker::StructuralEq for F64Header {}
#[automatically_derived]
impl ::core::cmp::Eq for F64Header {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for F64Header {}
#[automatically_derived]
impl ::core::cmp::PartialEq for F64Header {
#[inline]
fn eq(&self, other: &F64Header) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for F64Header
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for F64Header
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for F64Header {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("F64Header");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl F64Header {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(F64_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != F64_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "F64Header",
expected: F64_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for F64Header {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct StringHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for StringHeader {
#[inline]
fn clone(&self) -> StringHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for StringHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for StringHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for StringHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for StringHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for StringHeader {
#[inline]
fn eq(&self, other: &StringHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for StringHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for StringHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for StringHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("StringHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("value_ref", &self.value_ref());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl StringHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn value_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(47, 32);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_value_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(47, 32, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(STR_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != STR_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "StringHeader",
expected: STR_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for StringHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct PtrHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for PtrHeader {
#[inline]
fn clone(&self) -> PtrHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for PtrHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for PtrHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for PtrHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for PtrHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for PtrHeader {
#[inline]
fn eq(&self, other: &PtrHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for PtrHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for PtrHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for PtrHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("PtrHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl PtrHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(PTR_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != PTR_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "PtrHeader",
expected: PTR_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for PtrHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct KobjHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for KobjHeader {
#[inline]
fn clone(&self) -> KobjHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for KobjHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for KobjHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for KobjHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for KobjHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for KobjHeader {
#[inline]
fn eq(&self, other: &KobjHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for KobjHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for KobjHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for KobjHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("KobjHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl KobjHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(KOBJ_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != KOBJ_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "KobjHeader",
expected: KOBJ_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for KobjHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct BoolHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for BoolHeader {
#[inline]
fn clone(&self) -> BoolHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for BoolHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for BoolHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for BoolHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BoolHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BoolHeader {
#[inline]
fn eq(&self, other: &BoolHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for BoolHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for BoolHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for BoolHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("BoolHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("value", &self.value());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl BoolHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn value(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(32, 32);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_value(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(32, 32, ::bitfield::Into::<u8>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(BOOL_ARG_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != BOOL_ARG_TYPE {
return Err(crate::ParseError::WrongType {
context: "BoolHeader",
expected: BOOL_ARG_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for BoolHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
}
mod blob {
use crate::{
args::{Arg, RawArg},
error::ParseWarning, init::Ticks, session::ResolveCtx, string::StringRef,
take_n_padded, thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRef},
trace_header, ParseResult, Provider, BLOB_RECORD_TYPE, LARGE_RECORD_TYPE,
};
use flyweights::FlyStr;
use nom::{combinator::all_consuming, number::complete::le_u64};
const BLOB_TYPE_DATA: u8 = 0x01;
const BLOB_TYPE_LAST_BRANCH: u8 = 0x02;
const BLOB_TYPE_PERFETTO: u8 = 0x03;
const LARGE_BLOB_WITH_METADATA_TYPE: u8 = 0;
const LARGE_BLOB_NO_METADATA_TYPE: u8 = 1;
pub struct BlobRecord {
pub provider: Option<Provider>,
pub name: FlyStr,
pub ty: BlobType,
pub bytes: Vec<u8>,
}
#[automatically_derived]
impl ::core::clone::Clone for BlobRecord {
#[inline]
fn clone(&self) -> BlobRecord {
BlobRecord {
provider: ::core::clone::Clone::clone(&self.provider),
name: ::core::clone::Clone::clone(&self.name),
ty: ::core::clone::Clone::clone(&self.ty),
bytes: ::core::clone::Clone::clone(&self.bytes),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for BlobRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"BlobRecord",
"provider",
&self.provider,
"name",
&self.name,
"ty",
&self.ty,
"bytes",
&&self.bytes,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BlobRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BlobRecord {
#[inline]
fn eq(&self, other: &BlobRecord) -> bool {
self.provider == other.provider && self.name == other.name
&& self.ty == other.ty && self.bytes == other.bytes
}
}
impl BlobRecord {
pub(super) fn resolve(ctx: &mut ResolveCtx, raw: RawBlobRecord<'_>) -> Self {
Self {
provider: ctx.current_provider(),
name: ctx.resolve_str(raw.name),
ty: raw.ty,
bytes: raw.bytes.to_owned(),
}
}
}
pub(super) struct RawBlobRecord<'a> {
name: StringRef<'a>,
ty: BlobType,
bytes: &'a [u8],
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawBlobRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(
f,
"RawBlobRecord",
"name",
&self.name,
"ty",
&self.ty,
"bytes",
&&self.bytes,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawBlobRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawBlobRecord<'a> {
#[inline]
fn eq(&self, other: &RawBlobRecord<'a>) -> bool {
self.name == other.name && self.ty == other.ty && self.bytes == other.bytes
}
}
impl<'a> RawBlobRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = BlobHeader::parse(buf)?;
let ty = BlobType::from(header.blob_format_type());
let (rem, payload) = header.take_payload(buf)?;
let (payload, name) = StringRef::parse(header.name_ref(), payload)?;
let (_should_be_empty, bytes) = take_n_padded(
header.payload_len() as usize,
payload,
)?;
Ok((rem, Self { name, ty, bytes }))
}
}
pub enum BlobType {
Data,
LastBranch,
Perfetto,
Unknown { raw: u8 },
}
#[automatically_derived]
impl ::core::clone::Clone for BlobType {
#[inline]
fn clone(&self) -> BlobType {
match self {
BlobType::Data => BlobType::Data,
BlobType::LastBranch => BlobType::LastBranch,
BlobType::Perfetto => BlobType::Perfetto,
BlobType::Unknown { raw: __self_0 } => {
BlobType::Unknown {
raw: ::core::clone::Clone::clone(__self_0),
}
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for BlobType {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
BlobType::Data => ::core::fmt::Formatter::write_str(f, "Data"),
BlobType::LastBranch => {
::core::fmt::Formatter::write_str(f, "LastBranch")
}
BlobType::Perfetto => ::core::fmt::Formatter::write_str(f, "Perfetto"),
BlobType::Unknown { raw: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"Unknown",
"raw",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BlobType {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BlobType {
#[inline]
fn eq(&self, other: &BlobType) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
BlobType::Unknown { raw: __self_0 },
BlobType::Unknown { raw: __arg1_0 },
) => *__self_0 == *__arg1_0,
_ => true,
}
}
}
impl From<u8> for BlobType {
fn from(raw: u8) -> Self {
match raw {
BLOB_TYPE_DATA => BlobType::Data,
BLOB_TYPE_LAST_BRANCH => BlobType::LastBranch,
BLOB_TYPE_PERFETTO => BlobType::Perfetto,
raw => BlobType::Unknown { raw },
}
}
}
pub struct BlobHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for BlobHeader {
#[inline]
fn clone(&self) -> BlobHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for BlobHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for BlobHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for BlobHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BlobHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BlobHeader {
#[inline]
fn eq(&self, other: &BlobHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for BlobHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for BlobHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for BlobHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("BlobHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.field("payload_len", &self.payload_len());
debug_struct.field("blob_format_type", &self.blob_format_type());
debug_struct.finish()
}
}
impl BlobHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
pub fn payload_len(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(36, 32);
::bitfield::Into::into(raw_value)
}
pub fn blob_format_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(55, 48);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
pub fn set_payload_len(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(36, 32, ::bitfield::Into::<u16>::into(value));
}
pub fn set_blob_format_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(55, 48, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(BLOB_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != BLOB_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "BlobHeader",
expected: BLOB_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for BlobHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct LargeBlobRecord {
pub provider: Option<Provider>,
pub ty: BlobType,
pub category: FlyStr,
pub name: FlyStr,
pub bytes: Vec<u8>,
pub metadata: Option<LargeBlobMetadata>,
}
#[automatically_derived]
impl ::core::clone::Clone for LargeBlobRecord {
#[inline]
fn clone(&self) -> LargeBlobRecord {
LargeBlobRecord {
provider: ::core::clone::Clone::clone(&self.provider),
ty: ::core::clone::Clone::clone(&self.ty),
category: ::core::clone::Clone::clone(&self.category),
name: ::core::clone::Clone::clone(&self.name),
bytes: ::core::clone::Clone::clone(&self.bytes),
metadata: ::core::clone::Clone::clone(&self.metadata),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for LargeBlobRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"provider",
"ty",
"category",
"name",
"bytes",
"metadata",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.provider,
&self.ty,
&self.category,
&self.name,
&self.bytes,
&&self.metadata,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"LargeBlobRecord",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LargeBlobRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LargeBlobRecord {
#[inline]
fn eq(&self, other: &LargeBlobRecord) -> bool {
self.provider == other.provider && self.ty == other.ty
&& self.category == other.category && self.name == other.name
&& self.bytes == other.bytes && self.metadata == other.metadata
}
}
impl LargeBlobRecord {
pub(super) fn resolve(
ctx: &mut ResolveCtx,
raw: RawLargeBlobRecord<'_>,
) -> Option<Self> {
let (bytes, metadata) = match raw.payload {
RawLargeBlobPayload::BytesAndMetadata(bytes, metadata) => {
(bytes.to_owned(), Some(LargeBlobMetadata::resolve(ctx, metadata)))
}
RawLargeBlobPayload::BytesOnly(bytes) => (bytes.to_owned(), None),
RawLargeBlobPayload::UnknownLargeBlobType { raw_type, .. } => {
ctx.add_warning(ParseWarning::UnknownLargeBlobType(raw_type));
return None;
}
};
Some(Self {
provider: ctx.current_provider(),
ty: raw.ty,
category: ctx.resolve_str(raw.category),
name: ctx.resolve_str(raw.name),
bytes,
metadata,
})
}
}
pub(super) struct RawLargeBlobRecord<'a> {
ty: BlobType,
category: StringRef<'a>,
name: StringRef<'a>,
payload: RawLargeBlobPayload<'a>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawLargeBlobRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawLargeBlobRecord",
"ty",
&self.ty,
"category",
&self.category,
"name",
&self.name,
"payload",
&&self.payload,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawLargeBlobRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawLargeBlobRecord<'a> {
#[inline]
fn eq(&self, other: &RawLargeBlobRecord<'a>) -> bool {
self.ty == other.ty && self.category == other.category
&& self.name == other.name && self.payload == other.payload
}
}
enum RawLargeBlobPayload<'a> {
BytesOnly(&'a [u8]),
BytesAndMetadata(&'a [u8], RawLargeBlobMetadata<'a>),
UnknownLargeBlobType { raw_type: u8, remaining_bytes: &'a [u8] },
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawLargeBlobPayload<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
RawLargeBlobPayload::BytesOnly(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"BytesOnly",
&__self_0,
)
}
RawLargeBlobPayload::BytesAndMetadata(__self_0, __self_1) => {
::core::fmt::Formatter::debug_tuple_field2_finish(
f,
"BytesAndMetadata",
__self_0,
&__self_1,
)
}
RawLargeBlobPayload::UnknownLargeBlobType {
raw_type: __self_0,
remaining_bytes: __self_1,
} => {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"UnknownLargeBlobType",
"raw_type",
__self_0,
"remaining_bytes",
&__self_1,
)
}
}
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawLargeBlobPayload<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawLargeBlobPayload<'a> {
#[inline]
fn eq(&self, other: &RawLargeBlobPayload<'a>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
RawLargeBlobPayload::BytesOnly(__self_0),
RawLargeBlobPayload::BytesOnly(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawLargeBlobPayload::BytesAndMetadata(__self_0, __self_1),
RawLargeBlobPayload::BytesAndMetadata(__arg1_0, __arg1_1),
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
(
RawLargeBlobPayload::UnknownLargeBlobType {
raw_type: __self_0,
remaining_bytes: __self_1,
},
RawLargeBlobPayload::UnknownLargeBlobType {
raw_type: __arg1_0,
remaining_bytes: __arg1_1,
},
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl<'a> RawLargeBlobRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = LargeBlobHeader::parse(buf)?;
let ty = BlobType::from(header.blob_format_type());
let (rem, payload) = header.take_payload(buf)?;
let (payload, format_header) = nom::combinator::map(
le_u64,
LargeBlobFormatHeader,
)(payload)?;
let (payload, category) = StringRef::parse(
format_header.category_ref(),
payload,
)?;
let (payload, name) = StringRef::parse(format_header.name_ref(), payload)?;
let (payload, metadata) = match header.large_record_type() {
LARGE_BLOB_WITH_METADATA_TYPE => {
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, process) = ProcessRef::parse(
format_header.thread_ref(),
payload,
)?;
let (payload, thread) = ThreadRef::parse(
format_header.thread_ref(),
payload,
)?;
let (payload, args) = RawArg::parse_n(
format_header.num_args(),
payload,
)?;
(
payload,
Some(RawLargeBlobMetadata {
ticks,
process,
thread,
args,
}),
)
}
LARGE_BLOB_NO_METADATA_TYPE => (payload, None),
unknown => {
let payload = RawLargeBlobPayload::UnknownLargeBlobType {
raw_type: unknown,
remaining_bytes: payload,
};
return Ok((
rem,
Self {
ty,
category,
name,
payload,
},
));
}
};
let (payload, blob_size) = le_u64(payload)?;
let (empty, bytes) = all_consuming(|p| take_n_padded(
blob_size as usize,
p,
))(payload)?;
match (&empty, &[]) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(
kind,
&*left_val,
&*right_val,
::core::option::Option::Some(
format_args!(
"all_consuming must not return any trailing bytes",
),
),
);
}
}
};
let payload = if let Some(metadata) = metadata {
RawLargeBlobPayload::BytesAndMetadata(bytes, metadata)
} else {
RawLargeBlobPayload::BytesOnly(bytes)
};
Ok((
rem,
Self {
ty,
category,
name,
payload,
},
))
}
}
pub struct LargeBlobMetadata {
pub timestamp: i64,
pub process: ProcessKoid,
pub thread: ThreadKoid,
pub args: Vec<Arg>,
}
#[automatically_derived]
impl ::core::clone::Clone for LargeBlobMetadata {
#[inline]
fn clone(&self) -> LargeBlobMetadata {
LargeBlobMetadata {
timestamp: ::core::clone::Clone::clone(&self.timestamp),
process: ::core::clone::Clone::clone(&self.process),
thread: ::core::clone::Clone::clone(&self.thread),
args: ::core::clone::Clone::clone(&self.args),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for LargeBlobMetadata {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"LargeBlobMetadata",
"timestamp",
&self.timestamp,
"process",
&self.process,
"thread",
&self.thread,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LargeBlobMetadata {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LargeBlobMetadata {
#[inline]
fn eq(&self, other: &LargeBlobMetadata) -> bool {
self.timestamp == other.timestamp && self.process == other.process
&& self.thread == other.thread && self.args == other.args
}
}
impl LargeBlobMetadata {
fn resolve(ctx: &mut ResolveCtx, raw: RawLargeBlobMetadata<'_>) -> Self {
Self {
timestamp: ctx.resolve_ticks(raw.ticks),
process: ctx.resolve_process(raw.process),
thread: ctx.resolve_thread(raw.thread),
args: Arg::resolve_n(ctx, raw.args),
}
}
}
pub(super) struct RawLargeBlobMetadata<'a> {
ticks: Ticks,
process: ProcessRef,
thread: ThreadRef,
args: Vec<RawArg<'a>>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawLargeBlobMetadata<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawLargeBlobMetadata",
"ticks",
&self.ticks,
"process",
&self.process,
"thread",
&self.thread,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawLargeBlobMetadata<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawLargeBlobMetadata<'a> {
#[inline]
fn eq(&self, other: &RawLargeBlobMetadata<'a>) -> bool {
self.ticks == other.ticks && self.process == other.process
&& self.thread == other.thread && self.args == other.args
}
}
pub struct LargeBlobHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for LargeBlobHeader {
#[inline]
fn clone(&self) -> LargeBlobHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for LargeBlobHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for LargeBlobHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for LargeBlobHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LargeBlobHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LargeBlobHeader {
#[inline]
fn eq(&self, other: &LargeBlobHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for LargeBlobHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for LargeBlobHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for LargeBlobHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("LargeBlobHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("large_record_type", &self.large_record_type());
debug_struct.field("blob_format_type", &self.blob_format_type());
debug_struct.finish()
}
}
impl LargeBlobHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(35, 4);
::bitfield::Into::into(raw_value)
}
pub fn large_record_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(39, 36);
::bitfield::Into::into(raw_value)
}
pub fn blob_format_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(43, 40);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(35, 4, ::bitfield::Into::<u32>::into(value));
}
pub fn set_large_record_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(39, 36, ::bitfield::Into::<u8>::into(value));
}
pub fn set_blob_format_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(43, 40, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(LARGE_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != LARGE_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "LargeBlobHeader",
expected: LARGE_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for LargeBlobHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl<T> ::bitfield::BitRange<T> for LargeBlobFormatHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for LargeBlobFormatHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl ::bitfield::fmt::Debug for LargeBlobFormatHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("LargeBlobFormatHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("category_ref", &self.category_ref());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.field("num_args", &self.num_args());
debug_struct.field("thread_ref", &self.thread_ref());
debug_struct.finish()
}
}
struct LargeBlobFormatHeader(pub u64);
impl LargeBlobFormatHeader {
fn category_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 0);
::bitfield::Into::into(raw_value)
}
fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(31, 16);
::bitfield::Into::into(raw_value)
}
fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(35, 32);
::bitfield::Into::into(raw_value)
}
fn thread_ref(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(43, 36);
::bitfield::Into::into(raw_value)
}
fn set_category_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 0, ::bitfield::Into::<u16>::into(value));
}
fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 16, ::bitfield::Into::<u16>::into(value));
}
fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(35, 32, ::bitfield::Into::<u8>::into(value));
}
fn set_thread_ref(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(43, 36, ::bitfield::Into::<u8>::into(value));
}
}
}
mod error {
use crate::thread::{ProcessRef, ThreadRef};
use flyweights::FlyStr;
use std::num::NonZeroU16;
pub(crate) type ParseResult<'a, T> = nom::IResult<&'a [u8], T, ParseError>;
pub enum ParseError {
/// We encountered a generic `nom` error while parsing.
#[error("nom parsing error: {1:?}")]
Nom(nom::error::ErrorKind, #[source] Option<Box<Self>>),
/// We encountered an error performing I/O to read the trace session.
#[error("failure reading the trace session")]
Io(#[source] std::io::Error),
/// We encountered invalid UTF-8 while parsing.
#[error("couldn't parse string as utf-8")]
InvalidUtf8(#[from] #[source] std::str::Utf8Error),
/// We encountered a non-magic-number record at the beginning of the session.
#[error("trace session didn't start with the magic number record")]
MissingMagicNumber,
/// We encountered an unexpected type ordinal while parsing.
#[error("expected type {expected} for {context}, observed type {observed}")]
WrongType { expected: u8, observed: u8, context: &'static str },
/// We encountered an incorrect magic number while parsing.
#[error("got the wrong magic number: {observed}")]
InvalidMagicNumber { observed: u32 },
/// We encountered an invalid reference, like a zero thread id.
#[error("got an invalid ref")]
InvalidRef,
/// We encountered an invalid length for a record.
#[error("invalid length prefix encountered")]
InvalidSize,
}
#[automatically_derived]
impl ::core::fmt::Debug for ParseError {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ParseError::Nom(__self_0, __self_1) => {
::core::fmt::Formatter::debug_tuple_field2_finish(
f,
"Nom",
__self_0,
&__self_1,
)
}
ParseError::Io(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Io", &__self_0)
}
ParseError::InvalidUtf8(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"InvalidUtf8",
&__self_0,
)
}
ParseError::MissingMagicNumber => {
::core::fmt::Formatter::write_str(f, "MissingMagicNumber")
}
ParseError::WrongType {
expected: __self_0,
observed: __self_1,
context: __self_2,
} => {
::core::fmt::Formatter::debug_struct_field3_finish(
f,
"WrongType",
"expected",
__self_0,
"observed",
__self_1,
"context",
&__self_2,
)
}
ParseError::InvalidMagicNumber { observed: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"InvalidMagicNumber",
"observed",
&__self_0,
)
}
ParseError::InvalidRef => {
::core::fmt::Formatter::write_str(f, "InvalidRef")
}
ParseError::InvalidSize => {
::core::fmt::Formatter::write_str(f, "InvalidSize")
}
}
}
}
#[allow(unused_qualifications)]
impl std::error::Error for ParseError {
fn source(&self) -> std::option::Option<&(dyn std::error::Error + 'static)> {
use thiserror::private::AsDynError;
#[allow(deprecated)]
match self {
ParseError::Nom { 1: source, .. } => {
std::option::Option::Some(source.as_ref()?.as_dyn_error())
}
ParseError::Io { 0: source, .. } => {
std::option::Option::Some(source.as_dyn_error())
}
ParseError::InvalidUtf8 { 0: source, .. } => {
std::option::Option::Some(source.as_dyn_error())
}
ParseError::MissingMagicNumber { .. } => std::option::Option::None,
ParseError::WrongType { .. } => std::option::Option::None,
ParseError::InvalidMagicNumber { .. } => std::option::Option::None,
ParseError::InvalidRef { .. } => std::option::Option::None,
ParseError::InvalidSize { .. } => std::option::Option::None,
}
}
}
#[allow(unused_qualifications)]
impl std::fmt::Display for ParseError {
fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
#[allow(unused_imports)]
use thiserror::private::{DisplayAsDisplay, PathAsDisplay};
#[allow(unused_variables, deprecated, clippy::used_underscore_binding)]
match self {
ParseError::Nom(_0, _1) => {
__formatter.write_fmt(format_args!("nom parsing error: {0:?}", _1))
}
ParseError::Io(_0) => {
__formatter
.write_fmt(format_args!("failure reading the trace session"))
}
ParseError::InvalidUtf8(_0) => {
__formatter
.write_fmt(format_args!("couldn\'t parse string as utf-8"))
}
ParseError::MissingMagicNumber {} => {
__formatter
.write_fmt(
format_args!(
"trace session didn\'t start with the magic number record",
),
)
}
ParseError::WrongType { expected, observed, context } => {
__formatter
.write_fmt(
format_args!(
"expected type {0} for {1}, observed type {2}",
expected.as_display(),
context.as_display(),
observed.as_display(),
),
)
}
ParseError::InvalidMagicNumber { observed } => {
__formatter
.write_fmt(
format_args!(
"got the wrong magic number: {0}",
observed.as_display(),
),
)
}
ParseError::InvalidRef {} => {
__formatter.write_fmt(format_args!("got an invalid ref"))
}
ParseError::InvalidSize {} => {
__formatter
.write_fmt(format_args!("invalid length prefix encountered"))
}
}
}
}
#[allow(unused_qualifications)]
impl std::convert::From<std::str::Utf8Error> for ParseError {
#[allow(deprecated)]
fn from(source: std::str::Utf8Error) -> Self {
ParseError::InvalidUtf8 {
0: source,
}
}
}
impl nom::error::ParseError<&[u8]> for ParseError {
fn from_error_kind(_input: &[u8], kind: nom::error::ErrorKind) -> Self {
ParseError::Nom(kind, None)
}
fn append(_input: &[u8], kind: nom::error::ErrorKind, prev: Self) -> Self {
ParseError::Nom(kind, Some(Box::new(prev)))
}
}
/// Scenarios encountered during parsing that didn't prevent parsing from succeeding but which
/// might affect analysis of the session.
pub enum ParseWarning {
#[error("encountered unknown thread reference `{_0:?}`")]
UnknownThreadRef(ThreadRef),
#[error("encountered unknown process reference `{_0:?}`")]
UnknownProcessRef(ProcessRef),
#[error("encountered unknown trace record type {_0}")]
UnknownTraceRecordType(u8),
#[error("skipped arg '{name}' because of unknown type")]
SkippingArgWithUnknownType { name: FlyStr },
#[error("encountered unknown provider id {_0}")]
UnknownProviderId(u32),
#[error("encountered unknown string id {_0}")]
UnknownStringId(NonZeroU16),
#[error("encountered an empty string record")]
RecordForZeroStringId,
#[error("encountered unknown large blob type {_0}")]
UnknownLargeBlobType(u8),
#[error("encountered unknown metadata record type {_0}")]
UnknownMetadataRecordType(u8),
#[error("encountered unknown scheduling record type {_0}")]
UnknownSchedulingRecordType(u8),
}
#[automatically_derived]
impl ::core::clone::Clone for ParseWarning {
#[inline]
fn clone(&self) -> ParseWarning {
match self {
ParseWarning::UnknownThreadRef(__self_0) => {
ParseWarning::UnknownThreadRef(::core::clone::Clone::clone(__self_0))
}
ParseWarning::UnknownProcessRef(__self_0) => {
ParseWarning::UnknownProcessRef(
::core::clone::Clone::clone(__self_0),
)
}
ParseWarning::UnknownTraceRecordType(__self_0) => {
ParseWarning::UnknownTraceRecordType(
::core::clone::Clone::clone(__self_0),
)
}
ParseWarning::SkippingArgWithUnknownType { name: __self_0 } => {
ParseWarning::SkippingArgWithUnknownType {
name: ::core::clone::Clone::clone(__self_0),
}
}
ParseWarning::UnknownProviderId(__self_0) => {
ParseWarning::UnknownProviderId(
::core::clone::Clone::clone(__self_0),
)
}
ParseWarning::UnknownStringId(__self_0) => {
ParseWarning::UnknownStringId(::core::clone::Clone::clone(__self_0))
}
ParseWarning::RecordForZeroStringId => {
ParseWarning::RecordForZeroStringId
}
ParseWarning::UnknownLargeBlobType(__self_0) => {
ParseWarning::UnknownLargeBlobType(
::core::clone::Clone::clone(__self_0),
)
}
ParseWarning::UnknownMetadataRecordType(__self_0) => {
ParseWarning::UnknownMetadataRecordType(
::core::clone::Clone::clone(__self_0),
)
}
ParseWarning::UnknownSchedulingRecordType(__self_0) => {
ParseWarning::UnknownSchedulingRecordType(
::core::clone::Clone::clone(__self_0),
)
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ParseWarning {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ParseWarning::UnknownThreadRef(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownThreadRef",
&__self_0,
)
}
ParseWarning::UnknownProcessRef(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownProcessRef",
&__self_0,
)
}
ParseWarning::UnknownTraceRecordType(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownTraceRecordType",
&__self_0,
)
}
ParseWarning::SkippingArgWithUnknownType { name: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"SkippingArgWithUnknownType",
"name",
&__self_0,
)
}
ParseWarning::UnknownProviderId(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownProviderId",
&__self_0,
)
}
ParseWarning::UnknownStringId(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownStringId",
&__self_0,
)
}
ParseWarning::RecordForZeroStringId => {
::core::fmt::Formatter::write_str(f, "RecordForZeroStringId")
}
ParseWarning::UnknownLargeBlobType(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownLargeBlobType",
&__self_0,
)
}
ParseWarning::UnknownMetadataRecordType(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownMetadataRecordType",
&__self_0,
)
}
ParseWarning::UnknownSchedulingRecordType(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UnknownSchedulingRecordType",
&__self_0,
)
}
}
}
}
#[allow(unused_qualifications)]
impl std::error::Error for ParseWarning {}
#[allow(unused_qualifications)]
impl std::fmt::Display for ParseWarning {
fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
#[allow(unused_imports)]
use thiserror::private::{DisplayAsDisplay, PathAsDisplay};
#[allow(unused_variables, deprecated, clippy::used_underscore_binding)]
match self {
ParseWarning::UnknownThreadRef(_0) => {
__formatter
.write_fmt(
format_args!(
"encountered unknown thread reference `{0:?}`",
_0,
),
)
}
ParseWarning::UnknownProcessRef(_0) => {
__formatter
.write_fmt(
format_args!(
"encountered unknown process reference `{0:?}`",
_0,
),
)
}
ParseWarning::UnknownTraceRecordType(_0) => {
__formatter
.write_fmt(
format_args!("encountered unknown trace record type {0}", _0),
)
}
ParseWarning::SkippingArgWithUnknownType { name } => {
__formatter
.write_fmt(
format_args!(
"skipped arg \'{0}\' because of unknown type",
name.as_display(),
),
)
}
ParseWarning::UnknownProviderId(_0) => {
__formatter
.write_fmt(
format_args!("encountered unknown provider id {0}", _0),
)
}
ParseWarning::UnknownStringId(_0) => {
__formatter
.write_fmt(format_args!("encountered unknown string id {0}", _0))
}
ParseWarning::RecordForZeroStringId {} => {
__formatter
.write_fmt(format_args!("encountered an empty string record"))
}
ParseWarning::UnknownLargeBlobType(_0) => {
__formatter
.write_fmt(
format_args!("encountered unknown large blob type {0}", _0),
)
}
ParseWarning::UnknownMetadataRecordType(_0) => {
__formatter
.write_fmt(
format_args!(
"encountered unknown metadata record type {0}",
_0,
),
)
}
ParseWarning::UnknownSchedulingRecordType(_0) => {
__formatter
.write_fmt(
format_args!(
"encountered unknown scheduling record type {0}",
_0,
),
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ParseWarning {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ParseWarning {
#[inline]
fn eq(&self, other: &ParseWarning) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
ParseWarning::UnknownThreadRef(__self_0),
ParseWarning::UnknownThreadRef(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownProcessRef(__self_0),
ParseWarning::UnknownProcessRef(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownTraceRecordType(__self_0),
ParseWarning::UnknownTraceRecordType(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::SkippingArgWithUnknownType { name: __self_0 },
ParseWarning::SkippingArgWithUnknownType { name: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownProviderId(__self_0),
ParseWarning::UnknownProviderId(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownStringId(__self_0),
ParseWarning::UnknownStringId(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownLargeBlobType(__self_0),
ParseWarning::UnknownLargeBlobType(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownMetadataRecordType(__self_0),
ParseWarning::UnknownMetadataRecordType(__arg1_0),
) => *__self_0 == *__arg1_0,
(
ParseWarning::UnknownSchedulingRecordType(__self_0),
ParseWarning::UnknownSchedulingRecordType(__arg1_0),
) => *__self_0 == *__arg1_0,
_ => true,
}
}
}
}
mod event {
use crate::{
args::{Arg, RawArg, RawArgValue},
fxt_builder::FxtBuilder, init::Ticks, session::ResolveCtx,
string::{StringRef, STRING_REF_INLINE_BIT},
thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRef},
trace_header, ParseResult, Provider, EVENT_RECORD_TYPE,
};
use flyweights::FlyStr;
use nom::number::complete::le_u64;
pub(crate) const INSTANT_EVENT_TYPE: u8 = 0;
pub(crate) const COUNTER_EVENT_TYPE: u8 = 1;
pub(crate) const DURATION_BEGIN_EVENT_TYPE: u8 = 2;
pub(crate) const DURATION_END_EVENT_TYPE: u8 = 3;
pub(crate) const DURATION_COMPLETE_EVENT_TYPE: u8 = 4;
pub(crate) const ASYNC_BEGIN_EVENT_TYPE: u8 = 5;
pub(crate) const ASYNC_INSTANT_EVENT_TYPE: u8 = 6;
pub(crate) const ASYNC_END_EVENT_TYPE: u8 = 7;
pub(crate) const FLOW_BEGIN_EVENT_TYPE: u8 = 8;
pub(crate) const FLOW_STEP_EVENT_TYPE: u8 = 9;
pub(crate) const FLOW_END_EVENT_TYPE: u8 = 10;
pub fn symbolize<'a>(
ordinal: u64,
method: &'a str,
raw_record: &RawEventRecord<'a>,
) -> RawEventRecord<'a> {
let mut new_args = ::alloc::vec::Vec::new();
for arg in &raw_record.args {
if let &RawArgValue::Unsigned64(arg_value) = &arg.value {
if arg_value == ordinal {
let symbolized_arg = RawArg {
name: StringRef::Inline("method"),
value: RawArgValue::String(StringRef::Inline(method)),
};
new_args.push(symbolized_arg);
continue;
}
}
new_args.push(arg.clone());
}
RawEventRecord {
event_type: raw_record.event_type,
ticks: raw_record.ticks.clone(),
process: raw_record.process.clone(),
thread: raw_record.thread.clone(),
category: raw_record.category.clone(),
name: raw_record.name.clone(),
args: new_args,
payload: raw_record.payload.clone(),
}
}
pub struct EventRecord {
pub provider: Option<Provider>,
pub timestamp: i64,
pub process: ProcessKoid,
pub thread: ThreadKoid,
pub category: FlyStr,
pub name: FlyStr,
pub args: Vec<Arg>,
pub payload: EventPayload<i64>,
}
#[automatically_derived]
impl ::core::clone::Clone for EventRecord {
#[inline]
fn clone(&self) -> EventRecord {
EventRecord {
provider: ::core::clone::Clone::clone(&self.provider),
timestamp: ::core::clone::Clone::clone(&self.timestamp),
process: ::core::clone::Clone::clone(&self.process),
thread: ::core::clone::Clone::clone(&self.thread),
category: ::core::clone::Clone::clone(&self.category),
name: ::core::clone::Clone::clone(&self.name),
args: ::core::clone::Clone::clone(&self.args),
payload: ::core::clone::Clone::clone(&self.payload),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for EventRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"provider",
"timestamp",
"process",
"thread",
"category",
"name",
"args",
"payload",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.provider,
&self.timestamp,
&self.process,
&self.thread,
&self.category,
&self.name,
&self.args,
&&self.payload,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"EventRecord",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for EventRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for EventRecord {
#[inline]
fn eq(&self, other: &EventRecord) -> bool {
self.provider == other.provider && self.timestamp == other.timestamp
&& self.process == other.process && self.thread == other.thread
&& self.category == other.category && self.name == other.name
&& self.args == other.args && self.payload == other.payload
}
}
impl EventRecord {
pub(super) fn resolve(ctx: &mut ResolveCtx, raw: RawEventRecord<'_>) -> Self {
Self {
provider: ctx.current_provider(),
timestamp: ctx.resolve_ticks(raw.ticks),
process: ctx.resolve_process(raw.process),
thread: ctx.resolve_thread(raw.thread),
category: ctx.resolve_str(raw.category),
name: ctx.resolve_str(raw.name),
args: Arg::resolve_n(ctx, raw.args),
payload: raw.payload.resolve(ctx),
}
}
}
pub struct RawEventRecord<'a> {
event_type: u8,
ticks: Ticks,
process: ProcessRef,
thread: ThreadRef,
category: StringRef<'a>,
name: StringRef<'a>,
pub args: Vec<RawArg<'a>>,
payload: EventPayload<Ticks>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawEventRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"event_type",
"ticks",
"process",
"thread",
"category",
"name",
"args",
"payload",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.event_type,
&self.ticks,
&self.process,
&self.thread,
&self.category,
&self.name,
&self.args,
&&self.payload,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"RawEventRecord",
names,
values,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawEventRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawEventRecord<'a> {
#[inline]
fn eq(&self, other: &RawEventRecord<'a>) -> bool {
self.event_type == other.event_type && self.ticks == other.ticks
&& self.process == other.process && self.thread == other.thread
&& self.category == other.category && self.name == other.name
&& self.args == other.args && self.payload == other.payload
}
}
impl<'a> RawEventRecord<'a> {
pub fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = EventHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let event_type = header.event_type();
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, process) = ProcessRef::parse(header.thread_ref(), payload)?;
let (payload, thread) = ThreadRef::parse(header.thread_ref(), payload)?;
let (payload, category) = StringRef::parse(header.category_ref(), payload)?;
let (payload, name) = StringRef::parse(header.name_ref(), payload)?;
let (payload, args) = RawArg::parse_n(header.num_args(), payload)?;
let (_empty, payload) = EventPayload::parse(event_type, payload)?;
Ok((
rem,
Self {
event_type,
ticks,
process,
thread,
category,
name,
args,
payload,
},
))
}
pub fn make_header(&self) -> EventHeader {
let mut header = EventHeader::empty();
header.set_event_type(self.event_type);
header.set_num_args(self.args.len() as u8);
if let ProcessRef::Index(id) = self.process {
header.set_thread_ref(id.into());
}
let category_ref: u16 = match self.category {
StringRef::Index(id) => id.into(),
StringRef::Inline(category_stream) => {
category_stream.len() as u16 | STRING_REF_INLINE_BIT
}
StringRef::Empty => 0u16,
};
header.set_category_ref(category_ref);
let name_ref: u16 = match self.name {
StringRef::Index(id) => id.into(),
StringRef::Inline(name_stream) => {
name_stream.len() as u16 | STRING_REF_INLINE_BIT
}
StringRef::Empty => 0u16,
};
header.set_name_ref(name_ref);
header
}
pub fn serialize(&self) -> Result<Vec<u8>, String> {
let mut event_record = FxtBuilder::new(self.make_header());
event_record = event_record.atom(self.ticks.0.to_le_bytes());
if let ProcessRef::Inline(process_koid) = self.process {
event_record = event_record.atom(process_koid.0.to_le_bytes());
}
if let ThreadRef::Inline(thread_koid) = self.thread {
event_record = event_record.atom(thread_koid.0.to_le_bytes());
}
if let StringRef::Inline(category_stream) = self.category {
event_record = event_record.atom(category_stream);
}
if let StringRef::Inline(name_stream) = self.name {
event_record = event_record.atom(name_stream);
}
for arg in &self.args {
event_record = event_record.atom(arg.serialize()?);
}
match &self.payload {
EventPayload::Instant
| EventPayload::DurationBegin
| EventPayload::DurationEnd => {}
EventPayload::Counter { id }
| EventPayload::AsyncBegin { id }
| EventPayload::AsyncInstant { id }
| EventPayload::AsyncEnd { id }
| EventPayload::FlowBegin { id }
| EventPayload::FlowStep { id }
| EventPayload::FlowEnd { id } => {
event_record = event_record.atom(id.to_le_bytes());
}
EventPayload::DurationComplete { end_timestamp } => {
event_record = event_record.atom(end_timestamp.0.to_le_bytes());
}
EventPayload::Unknown { raw_type: _, bytes } => {
event_record = event_record.atom(bytes);
}
}
Ok(event_record.build())
}
}
pub struct EventHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for EventHeader {
#[inline]
fn clone(&self) -> EventHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for EventHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for EventHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for EventHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for EventHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for EventHeader {
#[inline]
fn eq(&self, other: &EventHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for EventHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for EventHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for EventHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("EventHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("event_type", &self.event_type());
debug_struct.field("num_args", &self.num_args());
debug_struct.field("thread_ref", &self.thread_ref());
debug_struct.field("category_ref", &self.category_ref());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.finish()
}
}
impl EventHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn event_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(23, 20);
::bitfield::Into::into(raw_value)
}
pub fn thread_ref(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(31, 24);
::bitfield::Into::into(raw_value)
}
pub fn category_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(47, 32);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(63, 48);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_event_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
pub fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 20, ::bitfield::Into::<u8>::into(value));
}
pub fn set_thread_ref(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(31, 24, ::bitfield::Into::<u8>::into(value));
}
pub fn set_category_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(47, 32, ::bitfield::Into::<u16>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 48, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(EVENT_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != EVENT_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "EventHeader",
expected: EVENT_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for EventHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub enum EventPayload<Time> {
Instant,
Counter { id: u64 },
DurationBegin,
DurationEnd,
DurationComplete { end_timestamp: Time },
AsyncBegin { id: u64 },
AsyncInstant { id: u64 },
AsyncEnd { id: u64 },
FlowBegin { id: u64 },
FlowStep { id: u64 },
FlowEnd { id: u64 },
Unknown { raw_type: u8, bytes: Vec<u8> },
}
#[automatically_derived]
impl<Time: ::core::clone::Clone> ::core::clone::Clone for EventPayload<Time> {
#[inline]
fn clone(&self) -> EventPayload<Time> {
match self {
EventPayload::Instant => EventPayload::Instant,
EventPayload::Counter { id: __self_0 } => {
EventPayload::Counter {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::DurationBegin => EventPayload::DurationBegin,
EventPayload::DurationEnd => EventPayload::DurationEnd,
EventPayload::DurationComplete { end_timestamp: __self_0 } => {
EventPayload::DurationComplete {
end_timestamp: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::AsyncBegin { id: __self_0 } => {
EventPayload::AsyncBegin {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::AsyncInstant { id: __self_0 } => {
EventPayload::AsyncInstant {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::AsyncEnd { id: __self_0 } => {
EventPayload::AsyncEnd {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::FlowBegin { id: __self_0 } => {
EventPayload::FlowBegin {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::FlowStep { id: __self_0 } => {
EventPayload::FlowStep {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::FlowEnd { id: __self_0 } => {
EventPayload::FlowEnd {
id: ::core::clone::Clone::clone(__self_0),
}
}
EventPayload::Unknown { raw_type: __self_0, bytes: __self_1 } => {
EventPayload::Unknown {
raw_type: ::core::clone::Clone::clone(__self_0),
bytes: ::core::clone::Clone::clone(__self_1),
}
}
}
}
}
#[automatically_derived]
impl<Time: ::core::fmt::Debug> ::core::fmt::Debug for EventPayload<Time> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
EventPayload::Instant => ::core::fmt::Formatter::write_str(f, "Instant"),
EventPayload::Counter { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"Counter",
"id",
&__self_0,
)
}
EventPayload::DurationBegin => {
::core::fmt::Formatter::write_str(f, "DurationBegin")
}
EventPayload::DurationEnd => {
::core::fmt::Formatter::write_str(f, "DurationEnd")
}
EventPayload::DurationComplete { end_timestamp: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"DurationComplete",
"end_timestamp",
&__self_0,
)
}
EventPayload::AsyncBegin { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"AsyncBegin",
"id",
&__self_0,
)
}
EventPayload::AsyncInstant { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"AsyncInstant",
"id",
&__self_0,
)
}
EventPayload::AsyncEnd { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"AsyncEnd",
"id",
&__self_0,
)
}
EventPayload::FlowBegin { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"FlowBegin",
"id",
&__self_0,
)
}
EventPayload::FlowStep { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"FlowStep",
"id",
&__self_0,
)
}
EventPayload::FlowEnd { id: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"FlowEnd",
"id",
&__self_0,
)
}
EventPayload::Unknown { raw_type: __self_0, bytes: __self_1 } => {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"Unknown",
"raw_type",
__self_0,
"bytes",
&__self_1,
)
}
}
}
}
#[automatically_derived]
impl<Time> ::core::marker::StructuralPartialEq for EventPayload<Time> {}
#[automatically_derived]
impl<Time: ::core::cmp::PartialEq> ::core::cmp::PartialEq for EventPayload<Time> {
#[inline]
fn eq(&self, other: &EventPayload<Time>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
EventPayload::Counter { id: __self_0 },
EventPayload::Counter { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::DurationComplete { end_timestamp: __self_0 },
EventPayload::DurationComplete { end_timestamp: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::AsyncBegin { id: __self_0 },
EventPayload::AsyncBegin { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::AsyncInstant { id: __self_0 },
EventPayload::AsyncInstant { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::AsyncEnd { id: __self_0 },
EventPayload::AsyncEnd { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::FlowBegin { id: __self_0 },
EventPayload::FlowBegin { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::FlowStep { id: __self_0 },
EventPayload::FlowStep { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::FlowEnd { id: __self_0 },
EventPayload::FlowEnd { id: __arg1_0 },
) => *__self_0 == *__arg1_0,
(
EventPayload::Unknown { raw_type: __self_0, bytes: __self_1 },
EventPayload::Unknown { raw_type: __arg1_0, bytes: __arg1_1 },
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
_ => true,
}
}
}
impl EventPayload<Ticks> {
pub(crate) fn resolve(self, ctx: &ResolveCtx) -> EventPayload<i64> {
match self {
EventPayload::Instant => EventPayload::Instant,
EventPayload::Counter { id } => EventPayload::Counter { id },
EventPayload::DurationBegin => EventPayload::DurationBegin,
EventPayload::DurationEnd => EventPayload::DurationEnd,
EventPayload::DurationComplete { end_timestamp } => {
EventPayload::DurationComplete {
end_timestamp: ctx.resolve_ticks(end_timestamp),
}
}
EventPayload::AsyncBegin { id } => EventPayload::AsyncBegin { id },
EventPayload::AsyncInstant { id } => EventPayload::AsyncInstant { id },
EventPayload::AsyncEnd { id } => EventPayload::AsyncEnd { id },
EventPayload::FlowBegin { id } => EventPayload::FlowBegin { id },
EventPayload::FlowStep { id } => EventPayload::FlowStep { id },
EventPayload::FlowEnd { id } => EventPayload::FlowEnd { id },
EventPayload::Unknown { raw_type, bytes } => {
EventPayload::Unknown {
raw_type,
bytes,
}
}
}
}
}
impl EventPayload<Ticks> {
fn parse(event_type: u8, buf: &[u8]) -> ParseResult<'_, Self> {
use nom::combinator::map;
match event_type {
INSTANT_EVENT_TYPE => Ok((buf, EventPayload::Instant)),
COUNTER_EVENT_TYPE => map(le_u64, |id| EventPayload::Counter { id })(buf),
DURATION_BEGIN_EVENT_TYPE => Ok((buf, EventPayload::DurationBegin)),
DURATION_END_EVENT_TYPE => Ok((buf, EventPayload::DurationEnd)),
DURATION_COMPLETE_EVENT_TYPE => {
map(
Ticks::parse,
|end_timestamp| EventPayload::DurationComplete {
end_timestamp,
},
)(buf)
}
ASYNC_BEGIN_EVENT_TYPE => {
map(le_u64, |id| EventPayload::AsyncBegin { id })(buf)
}
ASYNC_INSTANT_EVENT_TYPE => {
map(le_u64, |id| EventPayload::AsyncInstant { id })(buf)
}
ASYNC_END_EVENT_TYPE => {
map(le_u64, |id| EventPayload::AsyncEnd { id })(buf)
}
FLOW_BEGIN_EVENT_TYPE => {
map(le_u64, |id| EventPayload::FlowBegin { id })(buf)
}
FLOW_STEP_EVENT_TYPE => {
map(le_u64, |id| EventPayload::FlowStep { id })(buf)
}
FLOW_END_EVENT_TYPE => {
map(le_u64, |id| EventPayload::FlowEnd { id })(buf)
}
unknown => {
Ok((
&[][..],
EventPayload::Unknown {
raw_type: unknown,
bytes: buf.to_vec(),
},
))
}
}
}
}
}
mod fxt_builder {
pub(crate) struct FxtBuilder<H> {
header: H,
buf: Vec<u8>,
}
#[automatically_derived]
impl<H: ::core::clone::Clone> ::core::clone::Clone for FxtBuilder<H> {
#[inline]
fn clone(&self) -> FxtBuilder<H> {
FxtBuilder {
header: ::core::clone::Clone::clone(&self.header),
buf: ::core::clone::Clone::clone(&self.buf),
}
}
}
impl<H: crate::header::TraceHeader> FxtBuilder<H> {
/// Start a new fxt record with a typed header. The header should be completely configured for
/// the corresponding record except for its size in words which will be updated by the builder.
pub fn new(mut header: H) -> Self {
let mut buf = ::alloc::vec::Vec::new();
buf.resize(8, 0);
header.set_size_words(1);
Self { header, buf }
}
pub fn atom(mut self, atom: impl AsRef<[u8]>) -> Self {
self.buf.extend(atom.as_ref());
for _ in 0..crate::word_padding(self.buf.len()) {
self.buf.push(0);
}
match (&(self.buf.len() % 8), &0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(
kind,
&*left_val,
&*right_val,
::core::option::Option::Some(
format_args!(
"buffer should be word-aligned after adding padding",
),
),
);
}
}
};
if !(self.buf.len() < 32_768) {
{
::core::panicking::panic_fmt(
format_args!("maximum record size is 32kb"),
);
}
}
let size_words: u16 = (self.buf.len() / 8)
.try_into()
.expect("trace records size in words must fit in a u16");
self.header.set_size_words(size_words);
self
}
/// Return the bytes of a possibly-valid fxt record with the header in place.
pub fn build(mut self) -> Vec<u8> {
self.buf[..8].copy_from_slice(&self.header.to_le_bytes());
self.buf
}
}
impl<H: std::fmt::Debug> std::fmt::Debug for FxtBuilder<H> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let chunks = self.buf.chunks_exact(8).skip(1).collect::<Vec<_>>();
f.debug_struct("FxtBuilder")
.field("header", &self.header)
.field("buf", &chunks)
.finish()
}
}
}
mod header {
pub(crate) trait TraceHeader {
fn set_size_words(&mut self, n: u16);
fn to_le_bytes(&self) -> [u8; 8];
}
}
mod init {
use crate::{trace_header, ParseResult, INIT_RECORD_TYPE};
use nom::{combinator::all_consuming, number::complete::le_u64};
use std::time::Duration;
pub(crate) struct Ticks(pub(crate) u64);
#[automatically_derived]
impl ::core::clone::Clone for Ticks {
#[inline]
fn clone(&self) -> Ticks {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for Ticks {}
#[automatically_derived]
impl ::core::fmt::Debug for Ticks {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Ticks", &&self.0)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for Ticks {}
#[automatically_derived]
impl ::core::cmp::PartialEq for Ticks {
#[inline]
fn eq(&self, other: &Ticks) -> bool {
self.0 == other.0
}
}
impl Ticks {
pub(crate) fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
nom::combinator::map(nom::number::complete::le_u64, Ticks)(buf)
}
pub(crate) fn scale(self, ticks_per_second: u64) -> i64 {
const NANOS_PER_SECOND: u128 = Duration::from_secs(1).as_nanos() as _;
((self.0 as u128 * NANOS_PER_SECOND) / ticks_per_second as u128)
.try_into()
.expect(
"overflowing a signed monotonic timestamp would take ~292 years of uptime",
)
}
}
pub(super) struct InitRecord {
pub ticks_per_second: u64,
}
#[automatically_derived]
impl ::core::clone::Clone for InitRecord {
#[inline]
fn clone(&self) -> InitRecord {
InitRecord {
ticks_per_second: ::core::clone::Clone::clone(&self.ticks_per_second),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for InitRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"InitRecord",
"ticks_per_second",
&&self.ticks_per_second,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for InitRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for InitRecord {
#[inline]
fn eq(&self, other: &InitRecord) -> bool {
self.ticks_per_second == other.ticks_per_second
}
}
pub struct InitHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for InitHeader {
#[inline]
fn clone(&self) -> InitHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for InitHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for InitHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for InitHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for InitHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for InitHeader {
#[inline]
fn eq(&self, other: &InitHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for InitHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for InitHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for InitHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("InitHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.finish()
}
}
impl InitHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(INIT_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != INIT_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "InitHeader",
expected: INIT_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for InitHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl InitRecord {
pub(super) fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = InitHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (empty, ticks_per_second) = all_consuming(le_u64)(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((rem, Self { ticks_per_second }))
}
}
}
mod log {
use crate::{
init::Ticks, session::ResolveCtx, string::parse_padded_string,
thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRef},
trace_header, ParseResult, Provider, LOG_RECORD_TYPE,
};
use flyweights::FlyStr;
use nom::combinator::all_consuming;
pub struct LogRecord {
pub provider: Option<Provider>,
pub timestamp: i64,
pub process: ProcessKoid,
pub thread: ThreadKoid,
pub message: FlyStr,
}
#[automatically_derived]
impl ::core::clone::Clone for LogRecord {
#[inline]
fn clone(&self) -> LogRecord {
LogRecord {
provider: ::core::clone::Clone::clone(&self.provider),
timestamp: ::core::clone::Clone::clone(&self.timestamp),
process: ::core::clone::Clone::clone(&self.process),
thread: ::core::clone::Clone::clone(&self.thread),
message: ::core::clone::Clone::clone(&self.message),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for LogRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field5_finish(
f,
"LogRecord",
"provider",
&self.provider,
"timestamp",
&self.timestamp,
"process",
&self.process,
"thread",
&self.thread,
"message",
&&self.message,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LogRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LogRecord {
#[inline]
fn eq(&self, other: &LogRecord) -> bool {
self.provider == other.provider && self.timestamp == other.timestamp
&& self.process == other.process && self.thread == other.thread
&& self.message == other.message
}
}
impl LogRecord {
pub(super) fn resolve(ctx: &mut ResolveCtx, raw: RawLogRecord<'_>) -> Self {
Self {
provider: ctx.current_provider(),
timestamp: ctx.resolve_ticks(raw.ticks),
process: ctx.resolve_process(raw.process),
thread: ctx.resolve_thread(raw.thread),
message: raw.message.into(),
}
}
}
pub(super) struct RawLogRecord<'a> {
ticks: Ticks,
process: ProcessRef,
thread: ThreadRef,
message: &'a str,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawLogRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawLogRecord",
"ticks",
&self.ticks,
"process",
&self.process,
"thread",
&self.thread,
"message",
&&self.message,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawLogRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawLogRecord<'a> {
#[inline]
fn eq(&self, other: &RawLogRecord<'a>) -> bool {
self.ticks == other.ticks && self.process == other.process
&& self.thread == other.thread && self.message == other.message
}
}
impl<'a> RawLogRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = LogHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, process) = ProcessRef::parse(header.thread_ref(), payload)?;
let (payload, thread) = ThreadRef::parse(header.thread_ref(), payload)?;
let (empty, message) = all_consuming(|p| parse_padded_string(
header.message_len() as usize,
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
ticks,
process,
thread,
message,
},
))
}
}
pub struct LogHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for LogHeader {
#[inline]
fn clone(&self) -> LogHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for LogHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for LogHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for LogHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LogHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LogHeader {
#[inline]
fn eq(&self, other: &LogHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for LogHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for LogHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for LogHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("LogHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("message_len", &self.message_len());
debug_struct.field("thread_ref", &self.thread_ref());
debug_struct.finish()
}
}
impl LogHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn message_len(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(30, 16);
::bitfield::Into::into(raw_value)
}
pub fn thread_ref(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(39, 32);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_message_len(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(30, 16, ::bitfield::Into::<u16>::into(value));
}
pub fn set_thread_ref(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(39, 32, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(LOG_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != LOG_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "LogHeader",
expected: LOG_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for LogHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
}
mod metadata {
use crate::{
string::parse_padded_string, trace_header, ParseError, ParseResult,
METADATA_RECORD_TYPE,
};
use flyweights::FlyStr;
use nom::combinator::all_consuming;
const PROVIDER_INFO_METADATA_TYPE: u8 = 1;
const PROVIDER_SECTION_METADATA_TYPE: u8 = 2;
const PROVIDER_EVENT_METADATA_TYPE: u8 = 3;
const TRACE_INFO_METADATA_TYPE: u8 = 4;
pub struct Provider {
pub id: u32,
pub name: FlyStr,
}
#[automatically_derived]
impl ::core::clone::Clone for Provider {
#[inline]
fn clone(&self) -> Provider {
Provider {
id: ::core::clone::Clone::clone(&self.id),
name: ::core::clone::Clone::clone(&self.name),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for Provider {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"Provider",
"id",
&self.id,
"name",
&&self.name,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for Provider {}
#[automatically_derived]
impl ::core::cmp::PartialEq for Provider {
#[inline]
fn eq(&self, other: &Provider) -> bool {
self.id == other.id && self.name == other.name
}
}
pub(super) enum MetadataRecord {
ProviderInfo(ProviderInfoMetadataRecord),
ProviderSection(ProviderSectionMetadataRecord),
ProviderEvent(ProviderEventMetadataRecord),
TraceInfo(TraceInfoMetadataRecord),
Unknown { raw_type: u8 },
}
#[automatically_derived]
impl ::core::fmt::Debug for MetadataRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
MetadataRecord::ProviderInfo(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ProviderInfo",
&__self_0,
)
}
MetadataRecord::ProviderSection(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ProviderSection",
&__self_0,
)
}
MetadataRecord::ProviderEvent(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ProviderEvent",
&__self_0,
)
}
MetadataRecord::TraceInfo(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"TraceInfo",
&__self_0,
)
}
MetadataRecord::Unknown { raw_type: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"Unknown",
"raw_type",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for MetadataRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for MetadataRecord {
#[inline]
fn eq(&self, other: &MetadataRecord) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
MetadataRecord::ProviderInfo(__self_0),
MetadataRecord::ProviderInfo(__arg1_0),
) => *__self_0 == *__arg1_0,
(
MetadataRecord::ProviderSection(__self_0),
MetadataRecord::ProviderSection(__arg1_0),
) => *__self_0 == *__arg1_0,
(
MetadataRecord::ProviderEvent(__self_0),
MetadataRecord::ProviderEvent(__arg1_0),
) => *__self_0 == *__arg1_0,
(
MetadataRecord::TraceInfo(__self_0),
MetadataRecord::TraceInfo(__arg1_0),
) => *__self_0 == *__arg1_0,
(
MetadataRecord::Unknown { raw_type: __self_0 },
MetadataRecord::Unknown { raw_type: __arg1_0 },
) => *__self_0 == *__arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl MetadataRecord {
pub(super) fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
use nom::combinator::map;
match BaseMetadataHeader::parse(buf)?.1.metadata_type() {
PROVIDER_INFO_METADATA_TYPE => {
map(
ProviderInfoMetadataRecord::parse,
|i| Self::ProviderInfo(i),
)(buf)
}
PROVIDER_SECTION_METADATA_TYPE => {
map(
ProviderSectionMetadataRecord::parse,
|s| Self::ProviderSection(s),
)(buf)
}
PROVIDER_EVENT_METADATA_TYPE => {
map(
ProviderEventMetadataRecord::parse,
|e| Self::ProviderEvent(e),
)(buf)
}
TRACE_INFO_METADATA_TYPE => {
map(TraceInfoMetadataRecord::parse, |t| Self::TraceInfo(t))(buf)
}
unknown => Ok((buf, Self::Unknown { raw_type: unknown })),
}
}
}
pub struct BaseMetadataHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for BaseMetadataHeader {
#[inline]
fn clone(&self) -> BaseMetadataHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for BaseMetadataHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for BaseMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for BaseMetadataHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BaseMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BaseMetadataHeader {
#[inline]
fn eq(&self, other: &BaseMetadataHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for BaseMetadataHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for BaseMetadataHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for BaseMetadataHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("BaseMetadataHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("metadata_type", &self.metadata_type());
debug_struct.finish()
}
}
impl BaseMetadataHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn metadata_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_metadata_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(METADATA_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != METADATA_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "BaseMetadataHeader",
expected: METADATA_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h: &BaseMetadataHeader| {
Ok(())
})(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for BaseMetadataHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub(super) struct ProviderInfoMetadataRecord {
pub provider_id: u32,
pub name: FlyStr,
}
#[automatically_derived]
impl ::core::fmt::Debug for ProviderInfoMetadataRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"ProviderInfoMetadataRecord",
"provider_id",
&self.provider_id,
"name",
&&self.name,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderInfoMetadataRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderInfoMetadataRecord {
#[inline]
fn eq(&self, other: &ProviderInfoMetadataRecord) -> bool {
self.provider_id == other.provider_id && self.name == other.name
}
}
pub struct ProviderInfoMetadataHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ProviderInfoMetadataHeader {
#[inline]
fn clone(&self) -> ProviderInfoMetadataHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ProviderInfoMetadataHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ProviderInfoMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ProviderInfoMetadataHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderInfoMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderInfoMetadataHeader {
#[inline]
fn eq(&self, other: &ProviderInfoMetadataHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ProviderInfoMetadataHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ProviderInfoMetadataHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ProviderInfoMetadataHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ProviderInfoMetadataHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("provider_id", &self.provider_id());
debug_struct.field("name_len", &self.name_len());
debug_struct.field("metadata_type", &self.metadata_type());
debug_struct.finish()
}
}
impl ProviderInfoMetadataHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn provider_id(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(51, 20);
::bitfield::Into::into(raw_value)
}
pub fn name_len(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(59, 52);
::bitfield::Into::into(raw_value)
}
pub fn metadata_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_provider_id(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(51, 20, ::bitfield::Into::<u32>::into(value));
}
pub fn set_name_len(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(59, 52, ::bitfield::Into::<u8>::into(value));
}
pub fn set_metadata_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(METADATA_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != METADATA_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ProviderInfoMetadataHeader",
expected: METADATA_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h: &ProviderInfoMetadataHeader| {
if _h.metadata_type() != PROVIDER_INFO_METADATA_TYPE {
return Err(ParseError::WrongType {
context: "ProviderInfoMetadataHeader",
expected: PROVIDER_INFO_METADATA_TYPE,
observed: _h.metadata_type(),
});
}
Ok(())
})(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ProviderInfoMetadataHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl ProviderInfoMetadataRecord {
fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = ProviderInfoMetadataHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (empty, name) = all_consuming(|p| parse_padded_string(
header.name_len() as usize,
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
provider_id: header.provider_id(),
name: name.into(),
},
))
}
}
pub(super) struct ProviderSectionMetadataRecord {
pub provider_id: u32,
}
#[automatically_derived]
impl ::core::fmt::Debug for ProviderSectionMetadataRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"ProviderSectionMetadataRecord",
"provider_id",
&&self.provider_id,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderSectionMetadataRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderSectionMetadataRecord {
#[inline]
fn eq(&self, other: &ProviderSectionMetadataRecord) -> bool {
self.provider_id == other.provider_id
}
}
pub struct ProviderSectionMetadataHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ProviderSectionMetadataHeader {
#[inline]
fn clone(&self) -> ProviderSectionMetadataHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ProviderSectionMetadataHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ProviderSectionMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ProviderSectionMetadataHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderSectionMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderSectionMetadataHeader {
#[inline]
fn eq(&self, other: &ProviderSectionMetadataHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ProviderSectionMetadataHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ProviderSectionMetadataHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ProviderSectionMetadataHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ProviderSectionMetadataHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("provider_id", &self.provider_id());
debug_struct.field("metadata_type", &self.metadata_type());
debug_struct.finish()
}
}
impl ProviderSectionMetadataHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn provider_id(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(51, 20);
::bitfield::Into::into(raw_value)
}
pub fn metadata_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_provider_id(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(51, 20, ::bitfield::Into::<u32>::into(value));
}
pub fn set_metadata_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(METADATA_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != METADATA_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ProviderSectionMetadataHeader",
expected: METADATA_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|
_h: &ProviderSectionMetadataHeader|
{
if _h.metadata_type() != PROVIDER_SECTION_METADATA_TYPE {
return Err(ParseError::WrongType {
context: "ProviderSectionMetadataHeader",
expected: PROVIDER_SECTION_METADATA_TYPE,
observed: _h.metadata_type(),
});
}
Ok(())
})(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ProviderSectionMetadataHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl ProviderSectionMetadataRecord {
fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = ProviderSectionMetadataHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
if !payload.is_empty() {
return Err(nom::Err::Failure(ParseError::InvalidSize));
}
Ok((
rem,
Self {
provider_id: header.provider_id(),
},
))
}
}
const PROVIDER_EVENT_BUFFER_FULL: u8 = 0;
pub(super) struct ProviderEventMetadataRecord {
pub provider_id: u32,
pub event: ProviderEvent,
}
#[automatically_derived]
impl ::core::fmt::Debug for ProviderEventMetadataRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"ProviderEventMetadataRecord",
"provider_id",
&self.provider_id,
"event",
&&self.event,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderEventMetadataRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderEventMetadataRecord {
#[inline]
fn eq(&self, other: &ProviderEventMetadataRecord) -> bool {
self.provider_id == other.provider_id && self.event == other.event
}
}
pub struct ProviderEventMetadataHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ProviderEventMetadataHeader {
#[inline]
fn clone(&self) -> ProviderEventMetadataHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ProviderEventMetadataHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ProviderEventMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ProviderEventMetadataHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderEventMetadataHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderEventMetadataHeader {
#[inline]
fn eq(&self, other: &ProviderEventMetadataHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ProviderEventMetadataHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ProviderEventMetadataHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ProviderEventMetadataHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ProviderEventMetadataHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("provider_id", &self.provider_id());
debug_struct.field("event_id", &self.event_id());
debug_struct.field("metadata_type", &self.metadata_type());
debug_struct.finish()
}
}
impl ProviderEventMetadataHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn provider_id(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(51, 20);
::bitfield::Into::into(raw_value)
}
pub fn event_id(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(55, 52);
::bitfield::Into::into(raw_value)
}
pub fn metadata_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_provider_id(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(51, 20, ::bitfield::Into::<u32>::into(value));
}
pub fn set_event_id(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(55, 52, ::bitfield::Into::<u8>::into(value));
}
pub fn set_metadata_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(METADATA_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != METADATA_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ProviderEventMetadataHeader",
expected: METADATA_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h: &ProviderEventMetadataHeader| {
if _h.metadata_type() != PROVIDER_EVENT_METADATA_TYPE {
return Err(ParseError::WrongType {
context: "ProviderEventMetadataHeader",
expected: PROVIDER_EVENT_METADATA_TYPE,
observed: _h.metadata_type(),
});
}
Ok(())
})(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ProviderEventMetadataHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl ProviderEventMetadataRecord {
fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = ProviderEventMetadataHeader::parse(buf)?;
let provider_id = header.provider_id();
let event = match header.event_id() {
PROVIDER_EVENT_BUFFER_FULL => ProviderEvent::BufferFull,
unknown => {
ProviderEvent::Unknown {
raw_type: unknown,
}
}
};
let (rem, payload) = header.take_payload(buf)?;
if !payload.is_empty() {
return Err(nom::Err::Failure(ParseError::InvalidSize));
}
Ok((rem, Self { provider_id, event }))
}
}
pub enum ProviderEvent {
BufferFull,
Unknown { raw_type: u8 },
}
#[automatically_derived]
impl ::core::clone::Clone for ProviderEvent {
#[inline]
fn clone(&self) -> ProviderEvent {
match self {
ProviderEvent::BufferFull => ProviderEvent::BufferFull,
ProviderEvent::Unknown { raw_type: __self_0 } => {
ProviderEvent::Unknown {
raw_type: ::core::clone::Clone::clone(__self_0),
}
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ProviderEvent {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ProviderEvent::BufferFull => {
::core::fmt::Formatter::write_str(f, "BufferFull")
}
ProviderEvent::Unknown { raw_type: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"Unknown",
"raw_type",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProviderEvent {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProviderEvent {
#[inline]
fn eq(&self, other: &ProviderEvent) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
ProviderEvent::Unknown { raw_type: __self_0 },
ProviderEvent::Unknown { raw_type: __arg1_0 },
) => *__self_0 == *__arg1_0,
_ => true,
}
}
}
const MAGIC_NUMBER_TRACE_INFO_TYPE: u8 = 0;
const MAGIC_NUMBER: u32 = 0x16547846;
pub(super) enum TraceInfoMetadataRecord {
MagicNumber,
}
#[automatically_derived]
impl ::core::fmt::Debug for TraceInfoMetadataRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f, "MagicNumber")
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for TraceInfoMetadataRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for TraceInfoMetadataRecord {
#[inline]
fn eq(&self, other: &TraceInfoMetadataRecord) -> bool {
true
}
}
impl TraceInfoMetadataRecord {
fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = MagicNumberHeader::parse(buf)?;
if header.trace_info_type() != MAGIC_NUMBER_TRACE_INFO_TYPE {
return Err(
nom::Err::Error(ParseError::WrongType {
context: "MagicNumber",
expected: MAGIC_NUMBER_TRACE_INFO_TYPE,
observed: header.trace_info_type(),
}),
);
}
if header.magic_number() != MAGIC_NUMBER {
return Err(
nom::Err::Failure(ParseError::InvalidMagicNumber {
observed: header.magic_number(),
}),
);
}
let (rem, payload) = header.take_payload(buf)?;
if !payload.is_empty() {
return Err(nom::Err::Failure(ParseError::InvalidSize));
}
Ok((rem, Self::MagicNumber))
}
}
pub struct MagicNumberHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for MagicNumberHeader {
#[inline]
fn clone(&self) -> MagicNumberHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for MagicNumberHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for MagicNumberHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for MagicNumberHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for MagicNumberHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for MagicNumberHeader {
#[inline]
fn eq(&self, other: &MagicNumberHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for MagicNumberHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for MagicNumberHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for MagicNumberHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("MagicNumberHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("trace_info_type", &self.trace_info_type());
debug_struct.field("magic_number", &self.magic_number());
debug_struct.field("metadata_type", &self.metadata_type());
debug_struct.finish()
}
}
impl MagicNumberHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn trace_info_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(23, 20);
::bitfield::Into::into(raw_value)
}
pub fn magic_number(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(55, 24);
::bitfield::Into::into(raw_value)
}
pub fn metadata_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_trace_info_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 20, ::bitfield::Into::<u8>::into(value));
}
pub fn set_magic_number(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(55, 24, ::bitfield::Into::<u32>::into(value));
}
pub fn set_metadata_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(METADATA_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != METADATA_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "MagicNumberHeader",
expected: METADATA_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h: &MagicNumberHeader| {
if _h.metadata_type() != TRACE_INFO_METADATA_TYPE {
return Err(ParseError::WrongType {
context: "MagicNumberHeader",
expected: TRACE_INFO_METADATA_TYPE,
observed: _h.metadata_type(),
});
}
Ok(())
})(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for MagicNumberHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
}
mod objects {
use crate::{
args::{Arg, RawArg},
session::ResolveCtx, string::StringRef, thread::{ProcessKoid, ProcessRef},
trace_header, ParseResult, Provider, KERNEL_OBJ_RECORD_TYPE,
USERSPACE_OBJ_RECORD_TYPE,
};
use flyweights::FlyStr;
use nom::{combinator::all_consuming, number::complete::le_u64};
pub struct UserspaceObjRecord {
pub provider: Option<Provider>,
pub pointer: u64,
pub process: ProcessKoid,
pub name: FlyStr,
pub args: Vec<Arg>,
}
#[automatically_derived]
impl ::core::clone::Clone for UserspaceObjRecord {
#[inline]
fn clone(&self) -> UserspaceObjRecord {
UserspaceObjRecord {
provider: ::core::clone::Clone::clone(&self.provider),
pointer: ::core::clone::Clone::clone(&self.pointer),
process: ::core::clone::Clone::clone(&self.process),
name: ::core::clone::Clone::clone(&self.name),
args: ::core::clone::Clone::clone(&self.args),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for UserspaceObjRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field5_finish(
f,
"UserspaceObjRecord",
"provider",
&self.provider,
"pointer",
&self.pointer,
"process",
&self.process,
"name",
&self.name,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for UserspaceObjRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for UserspaceObjRecord {
#[inline]
fn eq(&self, other: &UserspaceObjRecord) -> bool {
self.provider == other.provider && self.pointer == other.pointer
&& self.process == other.process && self.name == other.name
&& self.args == other.args
}
}
impl UserspaceObjRecord {
pub(super) fn resolve(
ctx: &mut ResolveCtx,
raw: RawUserspaceObjRecord<'_>,
) -> Self {
Self {
provider: ctx.current_provider(),
pointer: raw.pointer,
process: ctx.resolve_process(raw.process),
name: ctx.resolve_str(raw.name),
args: Arg::resolve_n(ctx, raw.args),
}
}
}
pub(super) struct RawUserspaceObjRecord<'a> {
pointer: u64,
process: ProcessRef,
name: StringRef<'a>,
args: Vec<RawArg<'a>>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawUserspaceObjRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawUserspaceObjRecord",
"pointer",
&self.pointer,
"process",
&self.process,
"name",
&self.name,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawUserspaceObjRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawUserspaceObjRecord<'a> {
#[inline]
fn eq(&self, other: &RawUserspaceObjRecord<'a>) -> bool {
self.pointer == other.pointer && self.process == other.process
&& self.name == other.name && self.args == other.args
}
}
impl<'a> RawUserspaceObjRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'_, Self> {
let (buf, header) = UserspaceObjHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (payload, pointer) = le_u64(payload)?;
let (payload, process) = ProcessRef::parse(header.process_ref(), payload)?;
let (payload, name) = StringRef::parse(header.name_ref(), payload)?;
let (empty, args) = all_consuming(|p| RawArg::parse_n(
header.num_args(),
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
pointer,
process,
name,
args,
},
))
}
}
pub struct UserspaceObjHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for UserspaceObjHeader {
#[inline]
fn clone(&self) -> UserspaceObjHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for UserspaceObjHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for UserspaceObjHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for UserspaceObjHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for UserspaceObjHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for UserspaceObjHeader {
#[inline]
fn eq(&self, other: &UserspaceObjHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for UserspaceObjHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for UserspaceObjHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for UserspaceObjHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("UserspaceObjHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("process_ref", &self.process_ref());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.field("num_args", &self.num_args());
debug_struct.finish()
}
}
impl UserspaceObjHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn process_ref(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(23, 16);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(39, 24);
::bitfield::Into::into(raw_value)
}
pub fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(43, 40);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_process_ref(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 16, ::bitfield::Into::<u8>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(39, 24, ::bitfield::Into::<u16>::into(value));
}
pub fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(43, 40, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(USERSPACE_OBJ_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != USERSPACE_OBJ_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "UserspaceObjHeader",
expected: USERSPACE_OBJ_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for UserspaceObjHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct KernelObjRecord {
pub provider: Option<Provider>,
pub koid: u64,
pub ty: KernelObjType,
pub name: FlyStr,
pub args: Vec<Arg>,
}
#[automatically_derived]
impl ::core::clone::Clone for KernelObjRecord {
#[inline]
fn clone(&self) -> KernelObjRecord {
KernelObjRecord {
provider: ::core::clone::Clone::clone(&self.provider),
koid: ::core::clone::Clone::clone(&self.koid),
ty: ::core::clone::Clone::clone(&self.ty),
name: ::core::clone::Clone::clone(&self.name),
args: ::core::clone::Clone::clone(&self.args),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for KernelObjRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field5_finish(
f,
"KernelObjRecord",
"provider",
&self.provider,
"koid",
&self.koid,
"ty",
&self.ty,
"name",
&self.name,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for KernelObjRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for KernelObjRecord {
#[inline]
fn eq(&self, other: &KernelObjRecord) -> bool {
self.provider == other.provider && self.koid == other.koid
&& self.ty == other.ty && self.name == other.name
&& self.args == other.args
}
}
impl KernelObjRecord {
/// By convention kobj records have a "process" arg which lists the process koid.
pub fn process(&self) -> Option<ProcessKoid> {
for arg in &self.args {
if arg.name == "process" {
if let Some(k) = arg.value.kernel_obj() {
return Some(ProcessKoid(k));
}
}
}
None
}
pub(super) fn resolve(
ctx: &mut ResolveCtx,
raw: RawKernelObjRecord<'_>,
) -> Self {
Self {
provider: ctx.current_provider(),
koid: raw.koid,
ty: raw.ty,
name: ctx.resolve_str(raw.name),
args: Arg::resolve_n(ctx, raw.args),
}
}
}
pub(crate) struct RawKernelObjRecord<'a> {
koid: u64,
ty: KernelObjType,
name: StringRef<'a>,
args: Vec<RawArg<'a>>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawKernelObjRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawKernelObjRecord",
"koid",
&self.koid,
"ty",
&self.ty,
"name",
&self.name,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawKernelObjRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawKernelObjRecord<'a> {
#[inline]
fn eq(&self, other: &RawKernelObjRecord<'a>) -> bool {
self.koid == other.koid && self.ty == other.ty && self.name == other.name
&& self.args == other.args
}
}
impl<'a> RawKernelObjRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = KernelObjHeader::parse(buf)?;
let ty = KernelObjType::from(header.kernel_obj_type());
let (rem, payload) = header.take_payload(buf)?;
let (payload, koid) = le_u64(payload)?;
let (payload, name) = StringRef::parse(header.name_ref(), payload)?;
let (empty, args) = all_consuming(|p| RawArg::parse_n(
header.num_args(),
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((rem, Self { koid, name, args, ty }))
}
}
pub struct KernelObjHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for KernelObjHeader {
#[inline]
fn clone(&self) -> KernelObjHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for KernelObjHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for KernelObjHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for KernelObjHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for KernelObjHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for KernelObjHeader {
#[inline]
fn eq(&self, other: &KernelObjHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for KernelObjHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for KernelObjHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for KernelObjHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("KernelObjHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("kernel_obj_type", &self.kernel_obj_type());
debug_struct.field("name_ref", &self.name_ref());
debug_struct.field("num_args", &self.num_args());
debug_struct.finish()
}
}
impl KernelObjHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn kernel_obj_type(&self) -> u32 {
use ::bitfield::BitRange;
let raw_value: u32 = self.bit_range(23, 16);
::bitfield::Into::into(raw_value)
}
pub fn name_ref(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(39, 24);
::bitfield::Into::into(raw_value)
}
pub fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(43, 40);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_kernel_obj_type(&mut self, value: u32) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 16, ::bitfield::Into::<u32>::into(value));
}
pub fn set_name_ref(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(39, 24, ::bitfield::Into::<u16>::into(value));
}
pub fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(43, 40, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(KERNEL_OBJ_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != KERNEL_OBJ_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "KernelObjHeader",
expected: KERNEL_OBJ_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for KernelObjHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub enum KernelObjType {
None,
Process,
Thread,
Vmo,
Channel,
Event,
Port,
Interrupt,
PciDevice,
DebugLog,
Socket,
Resource,
EventPair,
Job,
Vmar,
Fifo,
Guest,
Vcpu,
Timer,
Iommu,
Bti,
Profile,
Pmt,
SuspendToken,
Pager,
Exception,
Clock,
Stream,
Msi,
Iob,
Unknown(u32),
}
#[automatically_derived]
impl ::core::clone::Clone for KernelObjType {
#[inline]
fn clone(&self) -> KernelObjType {
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for KernelObjType {}
#[automatically_derived]
impl ::core::fmt::Debug for KernelObjType {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
KernelObjType::None => ::core::fmt::Formatter::write_str(f, "None"),
KernelObjType::Process => ::core::fmt::Formatter::write_str(f, "Process"),
KernelObjType::Thread => ::core::fmt::Formatter::write_str(f, "Thread"),
KernelObjType::Vmo => ::core::fmt::Formatter::write_str(f, "Vmo"),
KernelObjType::Channel => ::core::fmt::Formatter::write_str(f, "Channel"),
KernelObjType::Event => ::core::fmt::Formatter::write_str(f, "Event"),
KernelObjType::Port => ::core::fmt::Formatter::write_str(f, "Port"),
KernelObjType::Interrupt => {
::core::fmt::Formatter::write_str(f, "Interrupt")
}
KernelObjType::PciDevice => {
::core::fmt::Formatter::write_str(f, "PciDevice")
}
KernelObjType::DebugLog => {
::core::fmt::Formatter::write_str(f, "DebugLog")
}
KernelObjType::Socket => ::core::fmt::Formatter::write_str(f, "Socket"),
KernelObjType::Resource => {
::core::fmt::Formatter::write_str(f, "Resource")
}
KernelObjType::EventPair => {
::core::fmt::Formatter::write_str(f, "EventPair")
}
KernelObjType::Job => ::core::fmt::Formatter::write_str(f, "Job"),
KernelObjType::Vmar => ::core::fmt::Formatter::write_str(f, "Vmar"),
KernelObjType::Fifo => ::core::fmt::Formatter::write_str(f, "Fifo"),
KernelObjType::Guest => ::core::fmt::Formatter::write_str(f, "Guest"),
KernelObjType::Vcpu => ::core::fmt::Formatter::write_str(f, "Vcpu"),
KernelObjType::Timer => ::core::fmt::Formatter::write_str(f, "Timer"),
KernelObjType::Iommu => ::core::fmt::Formatter::write_str(f, "Iommu"),
KernelObjType::Bti => ::core::fmt::Formatter::write_str(f, "Bti"),
KernelObjType::Profile => ::core::fmt::Formatter::write_str(f, "Profile"),
KernelObjType::Pmt => ::core::fmt::Formatter::write_str(f, "Pmt"),
KernelObjType::SuspendToken => {
::core::fmt::Formatter::write_str(f, "SuspendToken")
}
KernelObjType::Pager => ::core::fmt::Formatter::write_str(f, "Pager"),
KernelObjType::Exception => {
::core::fmt::Formatter::write_str(f, "Exception")
}
KernelObjType::Clock => ::core::fmt::Formatter::write_str(f, "Clock"),
KernelObjType::Stream => ::core::fmt::Formatter::write_str(f, "Stream"),
KernelObjType::Msi => ::core::fmt::Formatter::write_str(f, "Msi"),
KernelObjType::Iob => ::core::fmt::Formatter::write_str(f, "Iob"),
KernelObjType::Unknown(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unknown",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for KernelObjType {}
#[automatically_derived]
impl ::core::cmp::PartialEq for KernelObjType {
#[inline]
fn eq(&self, other: &KernelObjType) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
KernelObjType::Unknown(__self_0),
KernelObjType::Unknown(__arg1_0),
) => *__self_0 == *__arg1_0,
_ => true,
}
}
}
impl From<u32> for KernelObjType {
fn from(raw: u32) -> Self {
use fuchsia_zircon_types::*;
match raw {
ZX_OBJ_TYPE_NONE => Self::None,
ZX_OBJ_TYPE_PROCESS => Self::Process,
ZX_OBJ_TYPE_THREAD => Self::Thread,
ZX_OBJ_TYPE_VMO => Self::Vmo,
ZX_OBJ_TYPE_CHANNEL => Self::Channel,
ZX_OBJ_TYPE_EVENT => Self::Event,
ZX_OBJ_TYPE_PORT => Self::Port,
ZX_OBJ_TYPE_INTERRUPT => Self::Interrupt,
ZX_OBJ_TYPE_PCI_DEVICE => Self::PciDevice,
ZX_OBJ_TYPE_DEBUGLOG => Self::DebugLog,
ZX_OBJ_TYPE_SOCKET => Self::Socket,
ZX_OBJ_TYPE_RESOURCE => Self::Resource,
ZX_OBJ_TYPE_EVENTPAIR => Self::EventPair,
ZX_OBJ_TYPE_JOB => Self::Job,
ZX_OBJ_TYPE_VMAR => Self::Vmar,
ZX_OBJ_TYPE_FIFO => Self::Fifo,
ZX_OBJ_TYPE_GUEST => Self::Guest,
ZX_OBJ_TYPE_VCPU => Self::Vcpu,
ZX_OBJ_TYPE_TIMER => Self::Timer,
ZX_OBJ_TYPE_IOMMU => Self::Iommu,
ZX_OBJ_TYPE_BTI => Self::Bti,
ZX_OBJ_TYPE_PROFILE => Self::Profile,
ZX_OBJ_TYPE_PMT => Self::Pmt,
ZX_OBJ_TYPE_SUSPEND_TOKEN => Self::SuspendToken,
ZX_OBJ_TYPE_PAGER => Self::Pager,
ZX_OBJ_TYPE_EXCEPTION => Self::Exception,
ZX_OBJ_TYPE_CLOCK => Self::Clock,
ZX_OBJ_TYPE_STREAM => Self::Stream,
ZX_OBJ_TYPE_MSI => Self::Msi,
ZX_OBJ_TYPE_IOB => Self::Iob,
unknown => Self::Unknown(unknown),
}
}
}
}
mod scheduling {
use crate::{
args::{Arg, RawArg},
error::ParseWarning, init::Ticks, session::ResolveCtx,
thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRef},
trace_header, ParseError, ParseResult, Provider, SCHEDULING_RECORD_TYPE,
};
use nom::{combinator::all_consuming, number::complete::le_u64};
const LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE: u8 = 0;
const CONTEXT_SWITCH_SCHEDULING_TYPE: u8 = 1;
const THREAD_WAKEUP_SCHEDULING_TYPE: u8 = 2;
pub enum SchedulingRecord {
ContextSwitch(ContextSwitchEvent),
ThreadWakeup(ThreadWakeupEvent),
LegacyContextSwitch(LegacyContextSwitchEvent),
}
#[automatically_derived]
impl ::core::clone::Clone for SchedulingRecord {
#[inline]
fn clone(&self) -> SchedulingRecord {
match self {
SchedulingRecord::ContextSwitch(__self_0) => {
SchedulingRecord::ContextSwitch(
::core::clone::Clone::clone(__self_0),
)
}
SchedulingRecord::ThreadWakeup(__self_0) => {
SchedulingRecord::ThreadWakeup(::core::clone::Clone::clone(__self_0))
}
SchedulingRecord::LegacyContextSwitch(__self_0) => {
SchedulingRecord::LegacyContextSwitch(
::core::clone::Clone::clone(__self_0),
)
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for SchedulingRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
SchedulingRecord::ContextSwitch(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ContextSwitch",
&__self_0,
)
}
SchedulingRecord::ThreadWakeup(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ThreadWakeup",
&__self_0,
)
}
SchedulingRecord::LegacyContextSwitch(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"LegacyContextSwitch",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for SchedulingRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for SchedulingRecord {
#[inline]
fn eq(&self, other: &SchedulingRecord) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
SchedulingRecord::ContextSwitch(__self_0),
SchedulingRecord::ContextSwitch(__arg1_0),
) => *__self_0 == *__arg1_0,
(
SchedulingRecord::ThreadWakeup(__self_0),
SchedulingRecord::ThreadWakeup(__arg1_0),
) => *__self_0 == *__arg1_0,
(
SchedulingRecord::LegacyContextSwitch(__self_0),
SchedulingRecord::LegacyContextSwitch(__arg1_0),
) => *__self_0 == *__arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl SchedulingRecord {
/// The incoming process, if any.
pub fn process(&self) -> Option<ProcessKoid> {
match self {
Self::LegacyContextSwitch(
LegacyContextSwitchEvent { incoming_process, .. },
) => Some(*incoming_process),
Self::ContextSwitch(..) | Self::ThreadWakeup(..) => None,
}
}
/// The incoming thread, if any.
pub fn thread(&self) -> ThreadKoid {
match self {
Self::LegacyContextSwitch(
LegacyContextSwitchEvent { incoming_thread, .. },
) => *incoming_thread,
Self::ContextSwitch(ContextSwitchEvent { incoming_thread_id, .. }) => {
*incoming_thread_id
}
Self::ThreadWakeup(ThreadWakeupEvent { waking_thread_id, .. }) => {
*waking_thread_id
}
}
}
pub(super) fn resolve(
ctx: &mut ResolveCtx,
raw: RawSchedulingRecord<'_>,
) -> Option<Self> {
match raw {
RawSchedulingRecord::ContextSwitch(c) => {
Some(Self::ContextSwitch(ContextSwitchEvent::resolve(ctx, c)))
}
RawSchedulingRecord::ThreadWakeup(t) => {
Some(Self::ThreadWakeup(ThreadWakeupEvent::resolve(ctx, t)))
}
RawSchedulingRecord::LegacyContextSwitch(c) => {
Some(
Self::LegacyContextSwitch(
LegacyContextSwitchEvent::resolve(ctx, c),
),
)
}
RawSchedulingRecord::Unknown { raw_type, .. } => {
ctx.add_warning(ParseWarning::UnknownSchedulingRecordType(raw_type));
None
}
}
}
}
pub(super) enum RawSchedulingRecord<'a> {
ContextSwitch(RawContextSwitchEvent<'a>),
ThreadWakeup(RawThreadWakeupEvent<'a>),
LegacyContextSwitch(RawLegacyContextSwitchEvent),
Unknown { raw_type: u8, bytes: &'a [u8] },
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawSchedulingRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
RawSchedulingRecord::ContextSwitch(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ContextSwitch",
&__self_0,
)
}
RawSchedulingRecord::ThreadWakeup(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"ThreadWakeup",
&__self_0,
)
}
RawSchedulingRecord::LegacyContextSwitch(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"LegacyContextSwitch",
&__self_0,
)
}
RawSchedulingRecord::Unknown { raw_type: __self_0, bytes: __self_1 } => {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"Unknown",
"raw_type",
__self_0,
"bytes",
&__self_1,
)
}
}
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawSchedulingRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawSchedulingRecord<'a> {
#[inline]
fn eq(&self, other: &RawSchedulingRecord<'a>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
RawSchedulingRecord::ContextSwitch(__self_0),
RawSchedulingRecord::ContextSwitch(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawSchedulingRecord::ThreadWakeup(__self_0),
RawSchedulingRecord::ThreadWakeup(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawSchedulingRecord::LegacyContextSwitch(__self_0),
RawSchedulingRecord::LegacyContextSwitch(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawSchedulingRecord::Unknown {
raw_type: __self_0,
bytes: __self_1,
},
RawSchedulingRecord::Unknown {
raw_type: __arg1_0,
bytes: __arg1_1,
},
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
pub struct SchedulingHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for SchedulingHeader {
#[inline]
fn clone(&self) -> SchedulingHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for SchedulingHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for SchedulingHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for SchedulingHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for SchedulingHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for SchedulingHeader {
#[inline]
fn eq(&self, other: &SchedulingHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for SchedulingHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for SchedulingHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for SchedulingHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("SchedulingHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("record_type", &self.record_type());
debug_struct.finish()
}
}
impl SchedulingHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn record_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(63, 60);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_record_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 60, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(SCHEDULING_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != SCHEDULING_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "SchedulingHeader",
expected: SCHEDULING_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for SchedulingHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
impl<'a> RawSchedulingRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
use nom::combinator::map;
let base_header = SchedulingHeader::parse(buf)?.1;
match base_header.record_type() {
LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE => {
map(
RawLegacyContextSwitchEvent::parse,
Self::LegacyContextSwitch,
)(buf)
}
CONTEXT_SWITCH_SCHEDULING_TYPE => {
map(RawContextSwitchEvent::parse, Self::ContextSwitch)(buf)
}
THREAD_WAKEUP_SCHEDULING_TYPE => {
map(RawThreadWakeupEvent::parse, Self::ThreadWakeup)(buf)
}
unknown => {
let size_bytes = base_header.size_words() as usize * 8;
if size_bytes <= buf.len() {
let (unknown_record, rem) = buf.split_at(size_bytes);
Ok((
rem,
Self::Unknown {
raw_type: unknown,
bytes: unknown_record,
},
))
} else {
Err(
nom::Err::Incomplete(
nom::Needed::Size(size_bytes - buf.len()),
),
)
}
}
}
}
}
pub struct ContextSwitchEvent {
pub provider: Option<Provider>,
pub cpu_id: u16,
pub timestamp: i64,
pub outgoing_thread_state: ThreadState,
pub outgoing_thread_id: ThreadKoid,
pub incoming_thread_id: ThreadKoid,
pub args: Vec<Arg>,
}
#[automatically_derived]
impl ::core::clone::Clone for ContextSwitchEvent {
#[inline]
fn clone(&self) -> ContextSwitchEvent {
ContextSwitchEvent {
provider: ::core::clone::Clone::clone(&self.provider),
cpu_id: ::core::clone::Clone::clone(&self.cpu_id),
timestamp: ::core::clone::Clone::clone(&self.timestamp),
outgoing_thread_state: ::core::clone::Clone::clone(
&self.outgoing_thread_state,
),
outgoing_thread_id: ::core::clone::Clone::clone(
&self.outgoing_thread_id,
),
incoming_thread_id: ::core::clone::Clone::clone(
&self.incoming_thread_id,
),
args: ::core::clone::Clone::clone(&self.args),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ContextSwitchEvent {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"provider",
"cpu_id",
"timestamp",
"outgoing_thread_state",
"outgoing_thread_id",
"incoming_thread_id",
"args",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.provider,
&self.cpu_id,
&self.timestamp,
&self.outgoing_thread_state,
&self.outgoing_thread_id,
&self.incoming_thread_id,
&&self.args,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"ContextSwitchEvent",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ContextSwitchEvent {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ContextSwitchEvent {
#[inline]
fn eq(&self, other: &ContextSwitchEvent) -> bool {
self.provider == other.provider && self.cpu_id == other.cpu_id
&& self.timestamp == other.timestamp
&& self.outgoing_thread_state == other.outgoing_thread_state
&& self.outgoing_thread_id == other.outgoing_thread_id
&& self.incoming_thread_id == other.incoming_thread_id
&& self.args == other.args
}
}
impl ContextSwitchEvent {
fn resolve(ctx: &mut ResolveCtx, raw: RawContextSwitchEvent<'_>) -> Self {
Self {
provider: ctx.current_provider(),
cpu_id: raw.cpu_id,
timestamp: ctx.resolve_ticks(raw.ticks),
outgoing_thread_state: raw.outgoing_thread_state,
outgoing_thread_id: ThreadKoid(raw.outgoing_thread_id),
incoming_thread_id: ThreadKoid(raw.incoming_thread_id),
args: Arg::resolve_n(ctx, raw.args),
}
}
}
pub(super) struct RawContextSwitchEvent<'a> {
cpu_id: u16,
ticks: Ticks,
outgoing_thread_state: ThreadState,
outgoing_thread_id: u64,
incoming_thread_id: u64,
args: Vec<RawArg<'a>>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawContextSwitchEvent<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"cpu_id",
"ticks",
"outgoing_thread_state",
"outgoing_thread_id",
"incoming_thread_id",
"args",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.cpu_id,
&self.ticks,
&self.outgoing_thread_state,
&self.outgoing_thread_id,
&self.incoming_thread_id,
&&self.args,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"RawContextSwitchEvent",
names,
values,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawContextSwitchEvent<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawContextSwitchEvent<'a> {
#[inline]
fn eq(&self, other: &RawContextSwitchEvent<'a>) -> bool {
self.cpu_id == other.cpu_id && self.ticks == other.ticks
&& self.outgoing_thread_state == other.outgoing_thread_state
&& self.outgoing_thread_id == other.outgoing_thread_id
&& self.incoming_thread_id == other.incoming_thread_id
&& self.args == other.args
}
}
impl<'a> RawContextSwitchEvent<'a> {
fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = ContextSwitchHeader::parse(buf)?;
if header.record_type() != CONTEXT_SWITCH_SCHEDULING_TYPE {
return Err(
nom::Err::Error(ParseError::WrongType {
observed: header.record_type(),
expected: CONTEXT_SWITCH_SCHEDULING_TYPE,
context: "ContextSwitchEvent",
}),
);
}
let (rem, payload) = header.take_payload(buf)?;
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, outgoing_thread_id) = le_u64(payload)?;
let (payload, incoming_thread_id) = le_u64(payload)?;
let (empty, args) = all_consuming(|p| RawArg::parse_n(
header.num_args(),
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
cpu_id: header.cpu_id(),
outgoing_thread_state: ThreadState::parse(
header.outgoing_thread_state(),
),
ticks,
outgoing_thread_id,
incoming_thread_id,
args,
},
))
}
}
pub struct ContextSwitchHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ContextSwitchHeader {
#[inline]
fn clone(&self) -> ContextSwitchHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ContextSwitchHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ContextSwitchHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ContextSwitchHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ContextSwitchHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ContextSwitchHeader {
#[inline]
fn eq(&self, other: &ContextSwitchHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ContextSwitchHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ContextSwitchHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ContextSwitchHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ContextSwitchHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("num_args", &self.num_args());
debug_struct.field("cpu_id", &self.cpu_id());
debug_struct.field("outgoing_thread_state", &self.outgoing_thread_state());
debug_struct.field("record_type", &self.record_type());
debug_struct.finish()
}
}
impl ContextSwitchHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn cpu_id(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(35, 20);
::bitfield::Into::into(raw_value)
}
pub fn outgoing_thread_state(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(39, 36);
::bitfield::Into::into(raw_value)
}
pub fn record_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(63, 60);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
pub fn set_cpu_id(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(35, 20, ::bitfield::Into::<u16>::into(value));
}
pub fn set_outgoing_thread_state(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(39, 36, ::bitfield::Into::<u8>::into(value));
}
pub fn set_record_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 60, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(SCHEDULING_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != SCHEDULING_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ContextSwitchHeader",
expected: SCHEDULING_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ContextSwitchHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct ThreadWakeupEvent {
pub provider: Option<Provider>,
pub timestamp: i64,
pub cpu_id: u16,
pub waking_thread_id: ThreadKoid,
pub args: Vec<Arg>,
}
#[automatically_derived]
impl ::core::clone::Clone for ThreadWakeupEvent {
#[inline]
fn clone(&self) -> ThreadWakeupEvent {
ThreadWakeupEvent {
provider: ::core::clone::Clone::clone(&self.provider),
timestamp: ::core::clone::Clone::clone(&self.timestamp),
cpu_id: ::core::clone::Clone::clone(&self.cpu_id),
waking_thread_id: ::core::clone::Clone::clone(&self.waking_thread_id),
args: ::core::clone::Clone::clone(&self.args),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ThreadWakeupEvent {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field5_finish(
f,
"ThreadWakeupEvent",
"provider",
&self.provider,
"timestamp",
&self.timestamp,
"cpu_id",
&self.cpu_id,
"waking_thread_id",
&self.waking_thread_id,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadWakeupEvent {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadWakeupEvent {
#[inline]
fn eq(&self, other: &ThreadWakeupEvent) -> bool {
self.provider == other.provider && self.timestamp == other.timestamp
&& self.cpu_id == other.cpu_id
&& self.waking_thread_id == other.waking_thread_id
&& self.args == other.args
}
}
impl ThreadWakeupEvent {
fn resolve(ctx: &mut ResolveCtx, raw: RawThreadWakeupEvent<'_>) -> Self {
Self {
provider: ctx.current_provider(),
timestamp: ctx.resolve_ticks(raw.ticks),
cpu_id: raw.cpu_id,
waking_thread_id: ThreadKoid(raw.waking_thread_id),
args: Arg::resolve_n(ctx, raw.args),
}
}
}
pub(super) struct RawThreadWakeupEvent<'a> {
ticks: Ticks,
cpu_id: u16,
waking_thread_id: u64,
args: Vec<RawArg<'a>>,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawThreadWakeupEvent<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(
f,
"RawThreadWakeupEvent",
"ticks",
&self.ticks,
"cpu_id",
&self.cpu_id,
"waking_thread_id",
&self.waking_thread_id,
"args",
&&self.args,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawThreadWakeupEvent<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawThreadWakeupEvent<'a> {
#[inline]
fn eq(&self, other: &RawThreadWakeupEvent<'a>) -> bool {
self.ticks == other.ticks && self.cpu_id == other.cpu_id
&& self.waking_thread_id == other.waking_thread_id
&& self.args == other.args
}
}
impl<'a> RawThreadWakeupEvent<'a> {
fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = ThreadWakeupHeader::parse(buf)?;
if header.record_type() != THREAD_WAKEUP_SCHEDULING_TYPE {
return Err(
nom::Err::Error(ParseError::WrongType {
observed: header.record_type(),
expected: THREAD_WAKEUP_SCHEDULING_TYPE,
context: "ThreadWakeupEvent",
}),
);
}
let (rem, payload) = header.take_payload(buf)?;
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, waking_thread_id) = le_u64(payload)?;
let (empty, args) = all_consuming(|p| RawArg::parse_n(
header.num_args(),
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
ticks,
cpu_id: header.cpu_id(),
waking_thread_id,
args,
},
))
}
}
pub struct ThreadWakeupHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ThreadWakeupHeader {
#[inline]
fn clone(&self) -> ThreadWakeupHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ThreadWakeupHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ThreadWakeupHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ThreadWakeupHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadWakeupHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadWakeupHeader {
#[inline]
fn eq(&self, other: &ThreadWakeupHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ThreadWakeupHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ThreadWakeupHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ThreadWakeupHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ThreadWakeupHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("num_args", &self.num_args());
debug_struct.field("cpu_id", &self.cpu_id());
debug_struct.field("record_type", &self.record_type());
debug_struct.finish()
}
}
impl ThreadWakeupHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn num_args(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(19, 16);
::bitfield::Into::into(raw_value)
}
pub fn cpu_id(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(35, 20);
::bitfield::Into::into(raw_value)
}
pub fn record_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(63, 60);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_num_args(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(19, 16, ::bitfield::Into::<u8>::into(value));
}
pub fn set_cpu_id(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(35, 20, ::bitfield::Into::<u16>::into(value));
}
pub fn set_record_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 60, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(SCHEDULING_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != SCHEDULING_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ThreadWakeupHeader",
expected: SCHEDULING_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ThreadWakeupHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub struct LegacyContextSwitchEvent {
pub provider: Option<Provider>,
pub timestamp: i64,
pub cpu_id: u16,
pub outgoing_thread_state: ThreadState,
pub outgoing_process: ProcessKoid,
pub outgoing_thread: ThreadKoid,
pub outgoing_thread_priority: u8,
pub incoming_process: ProcessKoid,
pub incoming_thread: ThreadKoid,
pub incoming_thread_priority: u8,
}
#[automatically_derived]
impl ::core::clone::Clone for LegacyContextSwitchEvent {
#[inline]
fn clone(&self) -> LegacyContextSwitchEvent {
LegacyContextSwitchEvent {
provider: ::core::clone::Clone::clone(&self.provider),
timestamp: ::core::clone::Clone::clone(&self.timestamp),
cpu_id: ::core::clone::Clone::clone(&self.cpu_id),
outgoing_thread_state: ::core::clone::Clone::clone(
&self.outgoing_thread_state,
),
outgoing_process: ::core::clone::Clone::clone(&self.outgoing_process),
outgoing_thread: ::core::clone::Clone::clone(&self.outgoing_thread),
outgoing_thread_priority: ::core::clone::Clone::clone(
&self.outgoing_thread_priority,
),
incoming_process: ::core::clone::Clone::clone(&self.incoming_process),
incoming_thread: ::core::clone::Clone::clone(&self.incoming_thread),
incoming_thread_priority: ::core::clone::Clone::clone(
&self.incoming_thread_priority,
),
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for LegacyContextSwitchEvent {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"provider",
"timestamp",
"cpu_id",
"outgoing_thread_state",
"outgoing_process",
"outgoing_thread",
"outgoing_thread_priority",
"incoming_process",
"incoming_thread",
"incoming_thread_priority",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.provider,
&self.timestamp,
&self.cpu_id,
&self.outgoing_thread_state,
&self.outgoing_process,
&self.outgoing_thread,
&self.outgoing_thread_priority,
&self.incoming_process,
&self.incoming_thread,
&&self.incoming_thread_priority,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"LegacyContextSwitchEvent",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LegacyContextSwitchEvent {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LegacyContextSwitchEvent {
#[inline]
fn eq(&self, other: &LegacyContextSwitchEvent) -> bool {
self.provider == other.provider && self.timestamp == other.timestamp
&& self.cpu_id == other.cpu_id
&& self.outgoing_thread_state == other.outgoing_thread_state
&& self.outgoing_process == other.outgoing_process
&& self.outgoing_thread == other.outgoing_thread
&& self.outgoing_thread_priority == other.outgoing_thread_priority
&& self.incoming_process == other.incoming_process
&& self.incoming_thread == other.incoming_thread
&& self.incoming_thread_priority == other.incoming_thread_priority
}
}
impl LegacyContextSwitchEvent {
fn resolve(ctx: &mut ResolveCtx, raw: RawLegacyContextSwitchEvent) -> Self {
Self {
provider: ctx.current_provider(),
timestamp: ctx.resolve_ticks(raw.ticks),
cpu_id: raw.cpu_id,
outgoing_thread_state: raw.outgoing_thread_state,
outgoing_process: ctx.resolve_process(raw.outgoing_process),
outgoing_thread: ctx.resolve_thread(raw.outgoing_thread),
outgoing_thread_priority: raw.outgoing_thread_priority,
incoming_process: ctx.resolve_process(raw.incoming_process),
incoming_thread: ctx.resolve_thread(raw.incoming_thread),
incoming_thread_priority: raw.incoming_thread_priority,
}
}
}
pub(super) struct RawLegacyContextSwitchEvent {
ticks: Ticks,
cpu_id: u16,
outgoing_thread_state: ThreadState,
outgoing_process: ProcessRef,
outgoing_thread: ThreadRef,
outgoing_thread_priority: u8,
incoming_process: ProcessRef,
incoming_thread: ThreadRef,
incoming_thread_priority: u8,
}
#[automatically_derived]
impl ::core::fmt::Debug for RawLegacyContextSwitchEvent {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"ticks",
"cpu_id",
"outgoing_thread_state",
"outgoing_process",
"outgoing_thread",
"outgoing_thread_priority",
"incoming_process",
"incoming_thread",
"incoming_thread_priority",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.ticks,
&self.cpu_id,
&self.outgoing_thread_state,
&self.outgoing_process,
&self.outgoing_thread,
&self.outgoing_thread_priority,
&self.incoming_process,
&self.incoming_thread,
&&self.incoming_thread_priority,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"RawLegacyContextSwitchEvent",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for RawLegacyContextSwitchEvent {}
#[automatically_derived]
impl ::core::cmp::PartialEq for RawLegacyContextSwitchEvent {
#[inline]
fn eq(&self, other: &RawLegacyContextSwitchEvent) -> bool {
self.ticks == other.ticks && self.cpu_id == other.cpu_id
&& self.outgoing_thread_state == other.outgoing_thread_state
&& self.outgoing_process == other.outgoing_process
&& self.outgoing_thread == other.outgoing_thread
&& self.outgoing_thread_priority == other.outgoing_thread_priority
&& self.incoming_process == other.incoming_process
&& self.incoming_thread == other.incoming_thread
&& self.incoming_thread_priority == other.incoming_thread_priority
}
}
impl RawLegacyContextSwitchEvent {
fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = LegacyContextSwitchHeader::parse(buf)?;
if header.record_type() != LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE {
return Err(
nom::Err::Error(ParseError::WrongType {
observed: header.record_type(),
expected: LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE,
context: "LegacyContextSwitchEvent",
}),
);
}
let outgoing_thread_state = ThreadState::parse(
header.outgoing_thread_state(),
);
let (rem, payload) = header.take_payload(buf)?;
let (payload, ticks) = Ticks::parse(payload)?;
let (payload, outgoing_process) = ProcessRef::parse(
header.outgoing_thread(),
payload,
)?;
let (payload, outgoing_thread) = ThreadRef::parse(
header.outgoing_thread(),
payload,
)?;
let (payload, incoming_process) = ProcessRef::parse(
header.incoming_thread(),
payload,
)?;
let (empty, incoming_thread) = all_consuming(|p| ThreadRef::parse(
header.incoming_thread(),
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
ticks,
cpu_id: header.cpu_id(),
outgoing_thread_priority: header.outgoing_thread_priority(),
incoming_thread_priority: header.incoming_thread_priority(),
outgoing_thread_state,
outgoing_process,
outgoing_thread,
incoming_process,
incoming_thread,
},
))
}
}
pub struct LegacyContextSwitchHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for LegacyContextSwitchHeader {
#[inline]
fn clone(&self) -> LegacyContextSwitchHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for LegacyContextSwitchHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for LegacyContextSwitchHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for LegacyContextSwitchHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for LegacyContextSwitchHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for LegacyContextSwitchHeader {
#[inline]
fn eq(&self, other: &LegacyContextSwitchHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for LegacyContextSwitchHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for LegacyContextSwitchHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for LegacyContextSwitchHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("LegacyContextSwitchHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("cpu_id", &self.cpu_id());
debug_struct.field("outgoing_thread_state", &self.outgoing_thread_state());
debug_struct.field("outgoing_thread", &self.outgoing_thread());
debug_struct.field("incoming_thread", &self.incoming_thread());
debug_struct
.field("outgoing_thread_priority", &self.outgoing_thread_priority());
debug_struct
.field("incoming_thread_priority", &self.incoming_thread_priority());
debug_struct.field("record_type", &self.record_type());
debug_struct.finish()
}
}
impl LegacyContextSwitchHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn cpu_id(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(23, 16);
::bitfield::Into::into(raw_value)
}
pub fn outgoing_thread_state(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(27, 24);
::bitfield::Into::into(raw_value)
}
pub fn outgoing_thread(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(35, 28);
::bitfield::Into::into(raw_value)
}
pub fn incoming_thread(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(43, 36);
::bitfield::Into::into(raw_value)
}
pub fn outgoing_thread_priority(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(51, 44);
::bitfield::Into::into(raw_value)
}
pub fn incoming_thread_priority(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(59, 52);
::bitfield::Into::into(raw_value)
}
pub fn record_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(63, 60);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_cpu_id(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 16, ::bitfield::Into::<u16>::into(value));
}
pub fn set_outgoing_thread_state(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(27, 24, ::bitfield::Into::<u8>::into(value));
}
pub fn set_outgoing_thread(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(35, 28, ::bitfield::Into::<u8>::into(value));
}
pub fn set_incoming_thread(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(43, 36, ::bitfield::Into::<u8>::into(value));
}
pub fn set_outgoing_thread_priority(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(51, 44, ::bitfield::Into::<u8>::into(value));
}
pub fn set_incoming_thread_priority(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(59, 52, ::bitfield::Into::<u8>::into(value));
}
pub fn set_record_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(63, 60, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(SCHEDULING_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != SCHEDULING_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "LegacyContextSwitchHeader",
expected: SCHEDULING_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for LegacyContextSwitchHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub enum ThreadState {
New,
Running,
Suspended,
Blocked,
Dying,
Dead,
Unknown(u8),
}
#[automatically_derived]
impl ::core::clone::Clone for ThreadState {
#[inline]
fn clone(&self) -> ThreadState {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ThreadState {}
#[automatically_derived]
impl ::core::fmt::Debug for ThreadState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ThreadState::New => ::core::fmt::Formatter::write_str(f, "New"),
ThreadState::Running => ::core::fmt::Formatter::write_str(f, "Running"),
ThreadState::Suspended => {
::core::fmt::Formatter::write_str(f, "Suspended")
}
ThreadState::Blocked => ::core::fmt::Formatter::write_str(f, "Blocked"),
ThreadState::Dying => ::core::fmt::Formatter::write_str(f, "Dying"),
ThreadState::Dead => ::core::fmt::Formatter::write_str(f, "Dead"),
ThreadState::Unknown(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Unknown",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadState {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadState {
#[inline]
fn eq(&self, other: &ThreadState) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(ThreadState::Unknown(__self_0), ThreadState::Unknown(__arg1_0)) => {
*__self_0 == *__arg1_0
}
_ => true,
}
}
}
impl ThreadState {
fn parse(raw: u8) -> Self {
match raw {
0 => Self::New,
1 => Self::Running,
2 => Self::Suspended,
3 => Self::Blocked,
4 => Self::Dying,
5 => Self::Dead,
unknown => Self::Unknown(unknown),
}
}
}
}
mod session {
use crate::{
error::ParseWarning, init::{InitRecord, Ticks},
metadata::{
MetadataRecord, Provider, ProviderEventMetadataRecord,
ProviderInfoMetadataRecord, ProviderSectionMetadataRecord,
TraceInfoMetadataRecord,
},
string::{StringRecord, StringRef},
thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRecord, ThreadRef},
ParseError, ParsedWithOriginalBytes, RawTraceRecord, TraceRecord,
};
use flyweights::FlyStr;
use futures::{AsyncRead, AsyncReadExt, SinkExt, Stream};
use std::{collections::BTreeMap, marker::Unpin, num::{NonZeroU16, NonZeroU8}};
pub fn parse_full_session<'a>(
buf: &'a [u8],
) -> Result<(Vec<TraceRecord>, Vec<ParseWarning>), ParseError> {
let mut parser = SessionParser::new(std::io::Cursor::new(buf));
let mut records = ::alloc::vec::Vec::new();
while let Some(record) = parser.next() {
records.push(record?);
}
Ok((records, parser.warnings().to_owned()))
}
pub struct SessionParser<R> {
buffer: Vec<u8>,
reader: R,
resolver: ResolveCtx,
reader_is_eof: bool,
have_seen_magic_number: bool,
parsed_bytes: Vec<u8>,
}
#[automatically_derived]
impl<R: ::core::fmt::Debug> ::core::fmt::Debug for SessionParser<R> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"buffer",
"reader",
"resolver",
"reader_is_eof",
"have_seen_magic_number",
"parsed_bytes",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.buffer,
&self.reader,
&self.resolver,
&self.reader_is_eof,
&self.have_seen_magic_number,
&&self.parsed_bytes,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"SessionParser",
names,
values,
)
}
}
#[automatically_derived]
impl<R> ::core::marker::StructuralPartialEq for SessionParser<R> {}
#[automatically_derived]
impl<R: ::core::cmp::PartialEq> ::core::cmp::PartialEq for SessionParser<R> {
#[inline]
fn eq(&self, other: &SessionParser<R>) -> bool {
self.buffer == other.buffer && self.reader == other.reader
&& self.resolver == other.resolver
&& self.reader_is_eof == other.reader_is_eof
&& self.have_seen_magic_number == other.have_seen_magic_number
&& self.parsed_bytes == other.parsed_bytes
}
}
impl<R: std::io::Read> SessionParser<R> {
pub fn new(reader: R) -> Self {
Self {
buffer: ::alloc::vec::Vec::new(),
reader,
resolver: ResolveCtx::new(),
reader_is_eof: false,
have_seen_magic_number: false,
parsed_bytes: ::alloc::vec::Vec::new(),
}
}
}
impl<R> SessionParser<R> {
pub fn warnings(&self) -> &[ParseWarning] {
self.resolver.warnings()
}
pub fn parsed_bytes(&self) -> &[u8] {
return &self.parsed_bytes;
}
fn parse_next(&mut self) -> ParseOutcome {
match RawTraceRecord::parse(&self.buffer) {
Ok((rem, ParsedWithOriginalBytes { parsed: raw_record, bytes })) => {
self.parsed_bytes.extend(bytes);
if raw_record.is_magic_number() {
self.have_seen_magic_number = true;
} else {
if !self.have_seen_magic_number {
return ParseOutcome::Error(ParseError::MissingMagicNumber);
}
}
let resolve_res = TraceRecord::resolve(
&mut self.resolver,
raw_record,
);
let unused_len = rem.len();
let parsed_len = self.buffer.len() - unused_len;
self.buffer.copy_within(parsed_len.., 0);
self.buffer.truncate(unused_len);
match resolve_res {
Ok(None) => ParseOutcome::Continue,
Ok(Some(resolved)) => ParseOutcome::GotRecord(resolved),
Err(e) => ParseOutcome::Error(e),
}
}
Err(nom::Err::Error(e) | nom::Err::Failure(e)) => ParseOutcome::Error(e),
Err(nom::Err::Incomplete(needed)) => {
ParseOutcome::NeedMoreBytes(
match needed {
nom::Needed::Unknown => 32768,
nom::Needed::Size(n) => n,
},
)
}
}
}
}
enum ParseOutcome {
GotRecord(TraceRecord),
Continue,
Error(ParseError),
NeedMoreBytes(usize),
}
impl<R: std::io::Read> Iterator for SessionParser<R> {
type Item = Result<TraceRecord, ParseError>;
fn next(&mut self) -> Option<Self::Item> {
self.parsed_bytes.clear();
loop {
match self.parse_next() {
ParseOutcome::GotRecord(r) => return Some(Ok(r)),
ParseOutcome::Error(e) => return Some(Err(e)),
ParseOutcome::Continue => continue,
ParseOutcome::NeedMoreBytes(needed) => {
{
if self.reader_is_eof {
return None;
} else {
let original_len = self.buffer.len();
self.buffer.resize(original_len + needed, 0);
let bytes_read = match self
.reader
.read(&mut self.buffer[original_len..])
{
Ok(b) => b,
Err(e) => return Some(Err(ParseError::Io(e))),
};
if bytes_read == 0 {
self.reader_is_eof = true;
}
self.buffer.truncate(original_len + bytes_read);
}
};
}
}
}
}
}
impl<R: AsyncRead + Send + Unpin + 'static> SessionParser<R> {
pub fn new_async(
reader: R,
) -> (
impl Stream<Item = Result<TraceRecord, ParseError>>,
fuchsia_async::Task<Vec<ParseWarning>>,
) {
let (mut send, recv) = futures::channel::mpsc::channel(1);
let pump_task = fuchsia_async::Task::spawn(async move {
let mut parser = Self {
buffer: ::alloc::vec::Vec::new(),
reader,
resolver: ResolveCtx::new(),
reader_is_eof: false,
have_seen_magic_number: false,
parsed_bytes: ::alloc::vec::Vec::new(),
};
while let Some(next) = parser.next_async().await {
if send.send(next).await.is_err() {
break;
}
}
parser.warnings().to_owned()
});
(recv, pump_task)
}
pub async fn next_async(&mut self) -> Option<Result<TraceRecord, ParseError>> {
self.parsed_bytes.clear();
loop {
match self.parse_next() {
ParseOutcome::GotRecord(r) => return Some(Ok(r)),
ParseOutcome::Error(e) => return Some(Err(e)),
ParseOutcome::Continue => continue,
ParseOutcome::NeedMoreBytes(needed) => {
{
if self.reader_is_eof {
return None;
} else {
let original_len = self.buffer.len();
self.buffer.resize(original_len + needed, 0);
let bytes_read = match self
.reader
.read(&mut self.buffer[original_len..])
.await
{
Ok(b) => b,
Err(e) => return Some(Err(ParseError::Io(e))),
};
if bytes_read == 0 {
self.reader_is_eof = true;
}
self.buffer.truncate(original_len + bytes_read);
}
};
}
}
}
}
}
pub(crate) struct ResolveCtx {
ticks_per_second: u64,
current_provider: Option<Provider>,
providers: BTreeMap<u32, FlyStr>,
strings: BTreeMap<NonZeroU16, FlyStr>,
threads: BTreeMap<NonZeroU8, (ProcessKoid, ThreadKoid)>,
warnings: Vec<ParseWarning>,
}
#[automatically_derived]
impl ::core::fmt::Debug for ResolveCtx {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ = &[
"ticks_per_second",
"current_provider",
"providers",
"strings",
"threads",
"warnings",
];
let values: &[&dyn ::core::fmt::Debug] = &[
&self.ticks_per_second,
&self.current_provider,
&self.providers,
&self.strings,
&self.threads,
&&self.warnings,
];
::core::fmt::Formatter::debug_struct_fields_finish(
f,
"ResolveCtx",
names,
values,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ResolveCtx {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ResolveCtx {
#[inline]
fn eq(&self, other: &ResolveCtx) -> bool {
self.ticks_per_second == other.ticks_per_second
&& self.current_provider == other.current_provider
&& self.providers == other.providers && self.strings == other.strings
&& self.threads == other.threads && self.warnings == other.warnings
}
}
impl ResolveCtx {
pub fn new() -> Self {
Self {
ticks_per_second: 1,
current_provider: None,
providers: Default::default(),
strings: Default::default(),
threads: Default::default(),
warnings: Default::default(),
}
}
pub fn add_warning(&mut self, warning: ParseWarning) {
self.warnings.push(warning);
}
pub fn warnings(&self) -> &[ParseWarning] {
&self.warnings
}
pub fn current_provider(&self) -> Option<Provider> {
self.current_provider.clone()
}
pub fn get_provider(&mut self, id: u32) -> Result<Provider, ParseError> {
let name = if let Some(name) = self.providers.get(&id).cloned() {
name
} else {
self.add_warning(ParseWarning::UnknownProviderId(id));
"<unknown>".into()
};
Ok(Provider { id, name })
}
pub fn on_metadata_record(
&mut self,
m: MetadataRecord,
) -> Result<Option<TraceRecord>, ParseError> {
Ok(
match m {
MetadataRecord::TraceInfo(TraceInfoMetadataRecord::MagicNumber) => {
None
}
MetadataRecord::ProviderInfo(
ProviderInfoMetadataRecord { provider_id, name },
) => {
self.providers.insert(provider_id, name.clone());
self
.current_provider = Some(Provider {
id: provider_id,
name: name,
});
None
}
MetadataRecord::ProviderSection(
ProviderSectionMetadataRecord { provider_id },
) => {
let new_provider = self.get_provider(provider_id)?;
self.current_provider = Some(new_provider);
None
}
MetadataRecord::ProviderEvent(
ProviderEventMetadataRecord { provider_id, event },
) => {
Some(TraceRecord::ProviderEvent {
provider: self.get_provider(provider_id)?,
event,
})
}
MetadataRecord::Unknown { raw_type } => {
self.add_warning(
ParseWarning::UnknownMetadataRecordType(raw_type),
);
None
}
},
)
}
pub fn on_init_record(&mut self, InitRecord { ticks_per_second }: InitRecord) {
self.ticks_per_second = ticks_per_second;
}
pub fn on_string_record(&mut self, s: StringRecord<'_>) {
if let Some(idx) = NonZeroU16::new(s.index) {
self.strings.insert(idx, s.value.into());
} else {
self.add_warning(ParseWarning::RecordForZeroStringId);
}
}
pub fn on_thread_record(&mut self, t: ThreadRecord) {
self.threads.insert(t.index, (t.process_koid, t.thread_koid));
}
pub fn resolve_str(&mut self, s: StringRef<'_>) -> FlyStr {
match s {
StringRef::Empty => FlyStr::default(),
StringRef::Inline(inline) => FlyStr::from(inline),
StringRef::Index(id) => {
if let Some(s) = self.strings.get(&id).cloned() {
s
} else {
self.add_warning(ParseWarning::UnknownStringId(id));
"<unknown>".into()
}
}
}
}
pub fn resolve_process(&mut self, p: ProcessRef) -> ProcessKoid {
match p {
ProcessRef::Index(id) => {
if let Some(process) = self
.threads
.get(&id)
.map(|(process, _thread)| *process)
{
process
} else {
self.add_warning(ParseWarning::UnknownProcessRef(p));
ProcessKoid(std::u64::MAX)
}
}
ProcessRef::Inline(inline) => inline,
}
}
pub fn resolve_thread(&mut self, t: ThreadRef) -> ThreadKoid {
match t {
ThreadRef::Index(id) => {
if let Some(thread) = self
.threads
.get(&id)
.map(|(_process, thread)| *thread)
{
thread
} else {
self.warnings.push(ParseWarning::UnknownThreadRef(t));
ThreadKoid(std::u64::MAX)
}
}
ThreadRef::Inline(inline) => inline,
}
}
pub fn resolve_ticks(&self, t: Ticks) -> i64 {
t.scale(self.ticks_per_second)
}
}
}
mod string {
use crate::{
take_n_padded, trace_header, ParseError, ParseResult, STRING_RECORD_TYPE,
};
use nom::combinator::all_consuming;
use std::num::NonZeroU16;
pub(crate) const STRING_REF_INLINE_BIT: u16 = 1 << 15;
pub enum StringRef<'a> {
Empty,
Index(NonZeroU16),
Inline(&'a str),
}
#[automatically_derived]
impl<'a> ::core::clone::Clone for StringRef<'a> {
#[inline]
fn clone(&self) -> StringRef<'a> {
match self {
StringRef::Empty => StringRef::Empty,
StringRef::Index(__self_0) => {
StringRef::Index(::core::clone::Clone::clone(__self_0))
}
StringRef::Inline(__self_0) => {
StringRef::Inline(::core::clone::Clone::clone(__self_0))
}
}
}
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for StringRef<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
StringRef::Empty => ::core::fmt::Formatter::write_str(f, "Empty"),
StringRef::Index(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Index",
&__self_0,
)
}
StringRef::Inline(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Inline",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for StringRef<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for StringRef<'a> {
#[inline]
fn eq(&self, other: &StringRef<'a>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(StringRef::Index(__self_0), StringRef::Index(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(StringRef::Inline(__self_0), StringRef::Inline(__arg1_0)) => {
*__self_0 == *__arg1_0
}
_ => true,
}
}
}
impl<'a> StringRef<'a> {
pub(crate) fn parse(str_ref: u16, buf: &'a [u8]) -> ParseResult<'a, Self> {
if let Some(nonzero) = NonZeroU16::new(str_ref) {
if (nonzero.get() >> 15) & 1 == 0 {
Ok((buf, StringRef::Index(nonzero)))
} else {
let length = str_ref ^ STRING_REF_INLINE_BIT;
let (buf, inline) = parse_padded_string(length as usize, buf)?;
Ok((buf, StringRef::Inline(inline)))
}
} else {
Ok((buf, StringRef::Empty))
}
}
}
pub(super) struct StringRecord<'a> {
/// Index should not be 0 but we can't use NonZeroU16 according to the spec:
///
/// > String records that contain empty strings must be tolerated but they're pointless since
/// > the empty string can simply be encoded as zero in a string ref.
pub index: u16,
pub value: &'a str,
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for StringRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"StringRecord",
"index",
&self.index,
"value",
&&self.value,
)
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for StringRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for StringRecord<'a> {
#[inline]
fn eq(&self, other: &StringRecord<'a>) -> bool {
self.index == other.index && self.value == other.value
}
}
impl<'a> StringRecord<'a> {
pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
let (buf, header) = StringHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (empty, value) = all_consuming(|p| parse_padded_string(
header.string_len() as usize,
p,
))(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
Ok((
rem,
Self {
index: header.string_index(),
value,
},
))
}
}
pub struct StringHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for StringHeader {
#[inline]
fn clone(&self) -> StringHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for StringHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for StringHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for StringHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for StringHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for StringHeader {
#[inline]
fn eq(&self, other: &StringHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for StringHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for StringHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for StringHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("StringHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("string_index", &self.string_index());
debug_struct.field("string_len", &self.string_len());
debug_struct.finish()
}
}
impl StringHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn string_index(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(30, 16);
::bitfield::Into::into(raw_value)
}
pub fn string_len(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(46, 32);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_string_index(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(30, 16, ::bitfield::Into::<u16>::into(value));
}
pub fn set_string_len(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(46, 32, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(STRING_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != STRING_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "StringHeader",
expected: STRING_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for StringHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub(crate) fn parse_padded_string<'a>(
unpadded_len: usize,
buf: &'a [u8],
) -> ParseResult<'a, &'a str> {
let (rem, bytes) = take_n_padded(unpadded_len, buf)?;
let value = std::str::from_utf8(bytes)
.map_err(|e| nom::Err::Failure(ParseError::InvalidUtf8(e)))?;
Ok((rem, value))
}
}
mod thread {
use crate::{trace_header, ParseError, ParseResult, THREAD_RECORD_TYPE};
use nom::{combinator::all_consuming, number::complete::le_u64};
use std::num::NonZeroU8;
pub struct ProcessKoid(pub u64);
#[automatically_derived]
impl ::core::clone::Clone for ProcessKoid {
#[inline]
fn clone(&self) -> ProcessKoid {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ProcessKoid {}
#[automatically_derived]
impl ::core::fmt::Debug for ProcessKoid {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "ProcessKoid", &&self.0)
}
}
#[automatically_derived]
impl ::core::marker::StructuralEq for ProcessKoid {}
#[automatically_derived]
impl ::core::cmp::Eq for ProcessKoid {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::hash::Hash for ProcessKoid {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
::core::hash::Hash::hash(&self.0, state)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProcessKoid {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProcessKoid {
#[inline]
fn eq(&self, other: &ProcessKoid) -> bool {
self.0 == other.0
}
}
#[automatically_derived]
impl ::core::cmp::PartialOrd for ProcessKoid {
#[inline]
fn partial_cmp(
&self,
other: &ProcessKoid,
) -> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::cmp::Ord for ProcessKoid {
#[inline]
fn cmp(&self, other: &ProcessKoid) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.0, &other.0)
}
}
impl From<u64> for ProcessKoid {
fn from(n: u64) -> Self {
Self(n)
}
}
impl PartialEq<u64> for ProcessKoid {
fn eq(&self, other: &u64) -> bool {
self.0.eq(other)
}
}
impl std::fmt::Display for ProcessKoid {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{0}", self.0))
}
}
pub enum ProcessRef {
Index(NonZeroU8),
Inline(ProcessKoid),
}
#[automatically_derived]
impl ::core::clone::Clone for ProcessRef {
#[inline]
fn clone(&self) -> ProcessRef {
match self {
ProcessRef::Index(__self_0) => {
ProcessRef::Index(::core::clone::Clone::clone(__self_0))
}
ProcessRef::Inline(__self_0) => {
ProcessRef::Inline(::core::clone::Clone::clone(__self_0))
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ProcessRef {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ProcessRef::Index(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Index",
&__self_0,
)
}
ProcessRef::Inline(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Inline",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ProcessRef {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ProcessRef {
#[inline]
fn eq(&self, other: &ProcessRef) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(ProcessRef::Index(__self_0), ProcessRef::Index(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ProcessRef::Inline(__self_0), ProcessRef::Inline(__arg1_0)) => {
*__self_0 == *__arg1_0
}
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl ProcessRef {
pub(crate) fn parse<'a>(thread_ref: u8, buf: &'a [u8]) -> ParseResult<'a, Self> {
Ok(
if let Some(index) = NonZeroU8::new(thread_ref) {
(buf, Self::Index(index))
} else {
let (buf, koid) = le_u64(buf)?;
(buf, Self::Inline(ProcessKoid(koid)))
},
)
}
}
pub struct ThreadKoid(pub u64);
#[automatically_derived]
impl ::core::clone::Clone for ThreadKoid {
#[inline]
fn clone(&self) -> ThreadKoid {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ThreadKoid {}
#[automatically_derived]
impl ::core::fmt::Debug for ThreadKoid {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "ThreadKoid", &&self.0)
}
}
#[automatically_derived]
impl ::core::marker::StructuralEq for ThreadKoid {}
#[automatically_derived]
impl ::core::cmp::Eq for ThreadKoid {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::hash::Hash for ThreadKoid {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
::core::hash::Hash::hash(&self.0, state)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadKoid {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadKoid {
#[inline]
fn eq(&self, other: &ThreadKoid) -> bool {
self.0 == other.0
}
}
#[automatically_derived]
impl ::core::cmp::PartialOrd for ThreadKoid {
#[inline]
fn partial_cmp(
&self,
other: &ThreadKoid,
) -> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::cmp::Ord for ThreadKoid {
#[inline]
fn cmp(&self, other: &ThreadKoid) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.0, &other.0)
}
}
impl From<u64> for ThreadKoid {
fn from(n: u64) -> Self {
Self(n)
}
}
impl PartialEq<u64> for ThreadKoid {
fn eq(&self, other: &u64) -> bool {
self.0.eq(other)
}
}
impl std::fmt::Display for ThreadKoid {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{0}", self.0))
}
}
pub enum ThreadRef {
Index(NonZeroU8),
Inline(ThreadKoid),
}
#[automatically_derived]
impl ::core::clone::Clone for ThreadRef {
#[inline]
fn clone(&self) -> ThreadRef {
match self {
ThreadRef::Index(__self_0) => {
ThreadRef::Index(::core::clone::Clone::clone(__self_0))
}
ThreadRef::Inline(__self_0) => {
ThreadRef::Inline(::core::clone::Clone::clone(__self_0))
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for ThreadRef {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
ThreadRef::Index(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Index",
&__self_0,
)
}
ThreadRef::Inline(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Inline",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadRef {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadRef {
#[inline]
fn eq(&self, other: &ThreadRef) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(ThreadRef::Index(__self_0), ThreadRef::Index(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(ThreadRef::Inline(__self_0), ThreadRef::Inline(__arg1_0)) => {
*__self_0 == *__arg1_0
}
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl ThreadRef {
pub(crate) fn parse<'a>(thread_ref: u8, buf: &'a [u8]) -> ParseResult<'a, Self> {
Ok(
if let Some(index) = NonZeroU8::new(thread_ref) {
(buf, Self::Index(index))
} else {
let (buf, koid) = le_u64(buf)?;
(buf, Self::Inline(ThreadKoid(koid)))
},
)
}
}
pub(crate) struct ThreadRecord {
pub index: NonZeroU8,
pub process_koid: ProcessKoid,
pub thread_koid: ThreadKoid,
}
#[automatically_derived]
impl ::core::fmt::Debug for ThreadRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(
f,
"ThreadRecord",
"index",
&self.index,
"process_koid",
&self.process_koid,
"thread_koid",
&&self.thread_koid,
)
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadRecord {
#[inline]
fn eq(&self, other: &ThreadRecord) -> bool {
self.index == other.index && self.process_koid == other.process_koid
&& self.thread_koid == other.thread_koid
}
}
impl ThreadRecord {
pub(super) fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
let (buf, header) = ThreadHeader::parse(buf)?;
let (rem, payload) = header.take_payload(buf)?;
let (payload, process_koid) = nom::combinator::map(
le_u64,
ProcessKoid,
)(payload)?;
let (empty, thread_koid) = all_consuming(
nom::combinator::map(le_u64, ThreadKoid),
)(payload)?;
if !empty.is_empty() {
{
::core::panicking::panic_fmt(
format_args!(
"all_consuming must not return any remaining buffer",
),
);
}
}
let index = NonZeroU8::new(header.thread_index())
.ok_or(nom::Err::Error(ParseError::InvalidRef))?;
Ok((
rem,
Self {
index,
process_koid,
thread_koid,
},
))
}
}
pub struct ThreadHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for ThreadHeader {
#[inline]
fn clone(&self) -> ThreadHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for ThreadHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for ThreadHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for ThreadHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for ThreadHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for ThreadHeader {
#[inline]
fn eq(&self, other: &ThreadHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for ThreadHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for ThreadHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for ThreadHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("ThreadHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.field("thread_index", &self.thread_index());
debug_struct.finish()
}
}
impl ThreadHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn thread_index(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(23, 16);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
pub fn set_thread_index(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(23, 16, ::bitfield::Into::<u8>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header.set_raw_type(THREAD_RECORD_TYPE);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
if header.raw_type() != THREAD_RECORD_TYPE {
return Err(crate::ParseError::WrongType {
context: "ThreadHeader",
expected: THREAD_RECORD_TYPE,
observed: header.raw_type(),
});
}
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(
nom::number::streaming::le_u64,
|h| Self::new(h),
)(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for ThreadHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
}
pub use args::{Arg, ArgValue, RawArg, RawArgValue};
pub use blob::{BlobRecord, BlobType, LargeBlobMetadata, LargeBlobRecord};
pub use error::{ParseError, ParseWarning};
pub use event::{symbolize, EventPayload, EventRecord, RawEventRecord};
pub use log::LogRecord;
pub use metadata::{Provider, ProviderEvent};
pub use objects::{KernelObjRecord, UserspaceObjRecord};
pub use scheduling::{
ContextSwitchEvent, LegacyContextSwitchEvent, SchedulingRecord, ThreadState,
ThreadWakeupEvent,
};
pub use session::{parse_full_session, SessionParser};
pub use string::StringRef;
pub use thread::{ProcessKoid, ThreadKoid};
use crate::{
blob::{RawBlobRecord, RawLargeBlobRecord},
error::ParseResult, init::InitRecord, log::RawLogRecord,
metadata::{MetadataRecord, TraceInfoMetadataRecord},
objects::{RawKernelObjRecord, RawUserspaceObjRecord},
scheduling::RawSchedulingRecord, session::ResolveCtx, string::StringRecord,
thread::ThreadRecord,
};
pub enum TraceRecord {
Event(EventRecord),
Blob(BlobRecord),
UserspaceObj(UserspaceObjRecord),
KernelObj(KernelObjRecord),
Scheduling(SchedulingRecord),
Log(LogRecord),
LargeBlob(LargeBlobRecord),
ProviderEvent { provider: Provider, event: ProviderEvent },
}
#[automatically_derived]
impl ::core::clone::Clone for TraceRecord {
#[inline]
fn clone(&self) -> TraceRecord {
match self {
TraceRecord::Event(__self_0) => {
TraceRecord::Event(::core::clone::Clone::clone(__self_0))
}
TraceRecord::Blob(__self_0) => {
TraceRecord::Blob(::core::clone::Clone::clone(__self_0))
}
TraceRecord::UserspaceObj(__self_0) => {
TraceRecord::UserspaceObj(::core::clone::Clone::clone(__self_0))
}
TraceRecord::KernelObj(__self_0) => {
TraceRecord::KernelObj(::core::clone::Clone::clone(__self_0))
}
TraceRecord::Scheduling(__self_0) => {
TraceRecord::Scheduling(::core::clone::Clone::clone(__self_0))
}
TraceRecord::Log(__self_0) => {
TraceRecord::Log(::core::clone::Clone::clone(__self_0))
}
TraceRecord::LargeBlob(__self_0) => {
TraceRecord::LargeBlob(::core::clone::Clone::clone(__self_0))
}
TraceRecord::ProviderEvent { provider: __self_0, event: __self_1 } => {
TraceRecord::ProviderEvent {
provider: ::core::clone::Clone::clone(__self_0),
event: ::core::clone::Clone::clone(__self_1),
}
}
}
}
}
#[automatically_derived]
impl ::core::fmt::Debug for TraceRecord {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
TraceRecord::Event(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Event", &__self_0)
}
TraceRecord::Blob(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Blob", &__self_0)
}
TraceRecord::UserspaceObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UserspaceObj",
&__self_0,
)
}
TraceRecord::KernelObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"KernelObj",
&__self_0,
)
}
TraceRecord::Scheduling(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Scheduling",
&__self_0,
)
}
TraceRecord::Log(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Log", &__self_0)
}
TraceRecord::LargeBlob(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"LargeBlob",
&__self_0,
)
}
TraceRecord::ProviderEvent { provider: __self_0, event: __self_1 } => {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"ProviderEvent",
"provider",
__self_0,
"event",
&__self_1,
)
}
}
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for TraceRecord {}
#[automatically_derived]
impl ::core::cmp::PartialEq for TraceRecord {
#[inline]
fn eq(&self, other: &TraceRecord) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(TraceRecord::Event(__self_0), TraceRecord::Event(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(TraceRecord::Blob(__self_0), TraceRecord::Blob(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
TraceRecord::UserspaceObj(__self_0),
TraceRecord::UserspaceObj(__arg1_0),
) => *__self_0 == *__arg1_0,
(TraceRecord::KernelObj(__self_0), TraceRecord::KernelObj(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
TraceRecord::Scheduling(__self_0),
TraceRecord::Scheduling(__arg1_0),
) => *__self_0 == *__arg1_0,
(TraceRecord::Log(__self_0), TraceRecord::Log(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(TraceRecord::LargeBlob(__self_0), TraceRecord::LargeBlob(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
TraceRecord::ProviderEvent { provider: __self_0, event: __self_1 },
TraceRecord::ProviderEvent { provider: __arg1_0, event: __arg1_1 },
) => *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
impl TraceRecord {
pub fn process(&self) -> Option<ProcessKoid> {
match self {
Self::Event(EventRecord { process, .. })
| Self::Log(LogRecord { process, .. })
| Self::UserspaceObj(UserspaceObjRecord { process, .. }) => Some(*process),
Self::Scheduling(s) => s.process(),
Self::KernelObj(k) => k.process(),
Self::Blob(..) | Self::LargeBlob(..) | Self::ProviderEvent { .. } => None,
}
}
pub fn thread(&self) -> Option<ThreadKoid> {
match self {
Self::Event(EventRecord { thread, .. })
| Self::Log(LogRecord { thread, .. }) => Some(*thread),
Self::Scheduling(s) => Some(s.thread()),
Self::Blob(..)
| Self::KernelObj(..)
| Self::LargeBlob(..)
| Self::ProviderEvent { .. }
| Self::UserspaceObj(..) => None,
}
}
fn resolve(
ctx: &mut ResolveCtx,
raw: RawTraceRecord<'_>,
) -> Result<Option<Self>, ParseError> {
Ok(
match raw {
RawTraceRecord::Unknown { raw_type } => {
ctx.add_warning(ParseWarning::UnknownTraceRecordType(raw_type));
None
}
RawTraceRecord::Metadata(m) => ctx.on_metadata_record(m)?,
RawTraceRecord::Init(i) => {
ctx.on_init_record(i);
None
}
RawTraceRecord::String(s) => {
ctx.on_string_record(s);
None
}
RawTraceRecord::Thread(t) => {
ctx.on_thread_record(t);
None
}
RawTraceRecord::Event(e) => {
Some(Self::Event(EventRecord::resolve(ctx, e)))
}
RawTraceRecord::Blob(b) => Some(Self::Blob(BlobRecord::resolve(ctx, b))),
RawTraceRecord::UserspaceObj(u) => {
Some(Self::UserspaceObj(UserspaceObjRecord::resolve(ctx, u)))
}
RawTraceRecord::KernelObj(k) => {
Some(Self::KernelObj(KernelObjRecord::resolve(ctx, k)))
}
RawTraceRecord::Scheduling(s) => {
SchedulingRecord::resolve(ctx, s).map(Self::Scheduling)
}
RawTraceRecord::Log(l) => Some(Self::Log(LogRecord::resolve(ctx, l))),
RawTraceRecord::LargeBlob(lb) => {
LargeBlobRecord::resolve(ctx, lb).map(Self::LargeBlob)
}
},
)
}
}
enum RawTraceRecord<'a> {
Metadata(MetadataRecord),
Init(InitRecord),
String(StringRecord<'a>),
Thread(ThreadRecord),
Event(RawEventRecord<'a>),
Blob(RawBlobRecord<'a>),
UserspaceObj(RawUserspaceObjRecord<'a>),
KernelObj(RawKernelObjRecord<'a>),
Scheduling(RawSchedulingRecord<'a>),
Log(RawLogRecord<'a>),
LargeBlob(RawLargeBlobRecord<'a>),
Unknown { raw_type: u8 },
}
#[automatically_derived]
impl<'a> ::core::fmt::Debug for RawTraceRecord<'a> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
RawTraceRecord::Metadata(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Metadata",
&__self_0,
)
}
RawTraceRecord::Init(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Init", &__self_0)
}
RawTraceRecord::String(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "String", &__self_0)
}
RawTraceRecord::Thread(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Thread", &__self_0)
}
RawTraceRecord::Event(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Event", &__self_0)
}
RawTraceRecord::Blob(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Blob", &__self_0)
}
RawTraceRecord::UserspaceObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"UserspaceObj",
&__self_0,
)
}
RawTraceRecord::KernelObj(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"KernelObj",
&__self_0,
)
}
RawTraceRecord::Scheduling(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"Scheduling",
&__self_0,
)
}
RawTraceRecord::Log(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Log", &__self_0)
}
RawTraceRecord::LargeBlob(__self_0) => {
::core::fmt::Formatter::debug_tuple_field1_finish(
f,
"LargeBlob",
&__self_0,
)
}
RawTraceRecord::Unknown { raw_type: __self_0 } => {
::core::fmt::Formatter::debug_struct_field1_finish(
f,
"Unknown",
"raw_type",
&__self_0,
)
}
}
}
}
#[automatically_derived]
impl<'a> ::core::marker::StructuralPartialEq for RawTraceRecord<'a> {}
#[automatically_derived]
impl<'a> ::core::cmp::PartialEq for RawTraceRecord<'a> {
#[inline]
fn eq(&self, other: &RawTraceRecord<'a>) -> bool {
let __self_tag = ::core::intrinsics::discriminant_value(self);
let __arg1_tag = ::core::intrinsics::discriminant_value(other);
__self_tag == __arg1_tag
&& match (self, other) {
(
RawTraceRecord::Metadata(__self_0),
RawTraceRecord::Metadata(__arg1_0),
) => *__self_0 == *__arg1_0,
(RawTraceRecord::Init(__self_0), RawTraceRecord::Init(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawTraceRecord::String(__self_0), RawTraceRecord::String(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawTraceRecord::Thread(__self_0), RawTraceRecord::Thread(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawTraceRecord::Event(__self_0), RawTraceRecord::Event(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(RawTraceRecord::Blob(__self_0), RawTraceRecord::Blob(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
RawTraceRecord::UserspaceObj(__self_0),
RawTraceRecord::UserspaceObj(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawTraceRecord::KernelObj(__self_0),
RawTraceRecord::KernelObj(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawTraceRecord::Scheduling(__self_0),
RawTraceRecord::Scheduling(__arg1_0),
) => *__self_0 == *__arg1_0,
(RawTraceRecord::Log(__self_0), RawTraceRecord::Log(__arg1_0)) => {
*__self_0 == *__arg1_0
}
(
RawTraceRecord::LargeBlob(__self_0),
RawTraceRecord::LargeBlob(__arg1_0),
) => *__self_0 == *__arg1_0,
(
RawTraceRecord::Unknown { raw_type: __self_0 },
RawTraceRecord::Unknown { raw_type: __arg1_0 },
) => *__self_0 == *__arg1_0,
_ => unsafe { ::core::intrinsics::unreachable() }
}
}
}
pub struct BaseTraceHeader(u64);
#[automatically_derived]
impl ::core::clone::Clone for BaseTraceHeader {
#[inline]
fn clone(&self) -> BaseTraceHeader {
let _: ::core::clone::AssertParamIsClone<u64>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for BaseTraceHeader {}
#[automatically_derived]
impl ::core::marker::StructuralEq for BaseTraceHeader {}
#[automatically_derived]
impl ::core::cmp::Eq for BaseTraceHeader {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for BaseTraceHeader {}
#[automatically_derived]
impl ::core::cmp::PartialEq for BaseTraceHeader {
#[inline]
fn eq(&self, other: &BaseTraceHeader) -> bool {
self.0 == other.0
}
}
impl<T> ::bitfield::BitRange<T> for BaseTraceHeader
where
u64: ::bitfield::BitRange<T>,
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.0.bit_range(msb, lsb)
}
}
impl<T> ::bitfield::BitRangeMut<T> for BaseTraceHeader
where
u64: ::bitfield::BitRangeMut<T>,
{
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.0.set_bit_range(msb, lsb, value);
}
}
impl std::fmt::Debug for BaseTraceHeader {
fn fmt(&self, f: &mut ::bitfield::fmt::Formatter) -> ::bitfield::fmt::Result {
let mut debug_struct = f.debug_struct("BaseTraceHeader");
debug_struct.field(".0", &self.0);
debug_struct.field("raw_type", &self.raw_type());
debug_struct.field("size_words", &self.size_words());
debug_struct.finish()
}
}
impl BaseTraceHeader {
pub fn raw_type(&self) -> u8 {
use ::bitfield::BitRange;
let raw_value: u8 = self.bit_range(3, 0);
::bitfield::Into::into(raw_value)
}
pub fn size_words(&self) -> u16 {
use ::bitfield::BitRange;
let raw_value: u16 = self.bit_range(15, 4);
::bitfield::Into::into(raw_value)
}
pub fn set_raw_type(&mut self, value: u8) {
use ::bitfield::BitRangeMut;
self.set_bit_range(3, 0, ::bitfield::Into::<u8>::into(value));
}
pub fn set_size_words(&mut self, value: u16) {
use ::bitfield::BitRangeMut;
self.set_bit_range(15, 4, ::bitfield::Into::<u16>::into(value));
}
#[allow(unused, unused_mut)]
pub(crate) fn empty() -> Self {
let mut header = Self(0);
header
}
fn new(bits: u64) -> Result<Self, crate::ParseError> {
let header = Self(bits);
let res: Result<(), crate::ParseError> = (|_h| Ok(()))(&header);
res?;
Ok(header)
}
#[allow(unused)]
fn parse(buf: &[u8]) -> crate::ParseResult<'_, Self> {
nom::combinator::map_res(nom::number::streaming::le_u64, |h| Self::new(h))(buf)
}
#[allow(unused)]
fn take_payload<'a>(&self, buf: &'a [u8]) -> crate::ParseResult<'a, &'a [u8]> {
if self.size_words() == 0 {
return Err(nom::Err::Failure(crate::ParseError::InvalidSize));
}
let size_bytes_without_header = (self.size_words() as usize - 1) * 8;
if size_bytes_without_header > buf.len() {
let needed = size_bytes_without_header - buf.len();
return Err(nom::Err::Incomplete(nom::Needed::Size(needed)));
}
let (payload, rem) = buf.split_at(size_bytes_without_header);
Ok((rem, payload))
}
}
impl crate::header::TraceHeader for BaseTraceHeader {
fn set_size_words(&mut self, n: u16) {
self.set_size_words(n.try_into().unwrap());
}
fn to_le_bytes(&self) -> [u8; 8] {
self.0.to_le_bytes()
}
}
pub(crate) struct ParsedWithOriginalBytes<'a, T> {
pub parsed: T,
pub bytes: &'a [u8],
}
#[automatically_derived]
impl<'a, T: ::core::fmt::Debug> ::core::fmt::Debug for ParsedWithOriginalBytes<'a, T> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(
f,
"ParsedWithOriginalBytes",
"parsed",
&self.parsed,
"bytes",
&&self.bytes,
)
}
}
#[automatically_derived]
impl<'a, T> ::core::marker::StructuralPartialEq for ParsedWithOriginalBytes<'a, T> {}
#[automatically_derived]
impl<'a, T: ::core::cmp::PartialEq> ::core::cmp::PartialEq
for ParsedWithOriginalBytes<'a, T> {
#[inline]
fn eq(&self, other: &ParsedWithOriginalBytes<'a, T>) -> bool {
self.parsed == other.parsed && self.bytes == other.bytes
}
}
const METADATA_RECORD_TYPE: u8 = 0;
const INIT_RECORD_TYPE: u8 = 1;
const STRING_RECORD_TYPE: u8 = 2;
const THREAD_RECORD_TYPE: u8 = 3;
const EVENT_RECORD_TYPE: u8 = 4;
const BLOB_RECORD_TYPE: u8 = 5;
const USERSPACE_OBJ_RECORD_TYPE: u8 = 6;
const KERNEL_OBJ_RECORD_TYPE: u8 = 7;
const SCHEDULING_RECORD_TYPE: u8 = 8;
const LOG_RECORD_TYPE: u8 = 9;
const LARGE_RECORD_TYPE: u8 = 15;
impl<'a> RawTraceRecord<'a> {
fn parse(buf: &'a [u8]) -> ParseResult<'a, ParsedWithOriginalBytes<'a, Self>> {
use nom::combinator::map;
let base_header = BaseTraceHeader::parse(buf)?.1;
let size_bytes = base_header.size_words() as usize * 8;
if size_bytes == 0 {
return Err(nom::Err::Failure(ParseError::InvalidSize));
}
if size_bytes > buf.len() {
return Err(nom::Err::Incomplete(nom::Needed::Size(size_bytes - buf.len())));
}
let (buf, rem) = buf.split_at(size_bytes);
let (_, parsed) = match base_header.raw_type() {
METADATA_RECORD_TYPE => {
map(MetadataRecord::parse, |m| Self::Metadata(m))(buf)
}
INIT_RECORD_TYPE => map(InitRecord::parse, |i| Self::Init(i))(buf),
STRING_RECORD_TYPE => map(StringRecord::parse, |s| Self::String(s))(buf),
THREAD_RECORD_TYPE => map(ThreadRecord::parse, |t| Self::Thread(t))(buf),
EVENT_RECORD_TYPE => map(RawEventRecord::parse, |e| Self::Event(e))(buf),
BLOB_RECORD_TYPE => map(RawBlobRecord::parse, |b| Self::Blob(b))(buf),
USERSPACE_OBJ_RECORD_TYPE => {
map(RawUserspaceObjRecord::parse, |u| Self::UserspaceObj(u))(buf)
}
KERNEL_OBJ_RECORD_TYPE => {
map(RawKernelObjRecord::parse, |k| Self::KernelObj(k))(buf)
}
SCHEDULING_RECORD_TYPE => {
map(RawSchedulingRecord::parse, |s| Self::Scheduling(s))(buf)
}
LOG_RECORD_TYPE => map(RawLogRecord::parse, |l| Self::Log(l))(buf),
LARGE_RECORD_TYPE => {
map(RawLargeBlobRecord::parse, |l| Self::LargeBlob(l))(buf)
}
raw_type => Ok((&[][..], Self::Unknown { raw_type })),
}?;
Ok((
rem,
ParsedWithOriginalBytes {
parsed,
bytes: buf,
},
))
}
fn is_magic_number(&self) -> bool {
match self {
Self::Metadata(
MetadataRecord::TraceInfo(TraceInfoMetadataRecord::MagicNumber),
) => true,
_ => false,
}
}
}
/// Take the first `unpadded_len` bytes from a buffer, returning a suffix beginning at the next
/// world-aligned region and discarding padding bytes.
fn take_n_padded<'a>(unpadded_len: usize, buf: &'a [u8]) -> ParseResult<'a, &'a [u8]> {
let padded_len = unpadded_len + word_padding(unpadded_len);
if padded_len > buf.len() {
return Err(nom::Err::Incomplete(nom::Needed::Size(padded_len - buf.len())));
}
let (with_padding, rem) = buf.split_at(padded_len);
let (unpadded, _padding) = with_padding.split_at(unpadded_len);
Ok((rem, unpadded))
}
fn word_padding(unpadded_len: usize) -> usize {
match unpadded_len % 8 {
0 | 8 => 0,
nonzero => 8 - nonzero,
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment