Skip to content

Instantly share code, notes, and snippets.

@nickdesaulniers
Created January 19, 2019 00:27
Show Gist options
  • Save nickdesaulniers/540a435624bde7408c94c06e2666743f to your computer and use it in GitHub Desktop.
Save nickdesaulniers/540a435624bde7408c94c06e2666743f to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
# 1 "arch/x86/mm/fault.c"
# 1 "<built-in>" 1
# 1 "<built-in>" 3
# 346 "<built-in>" 3
# 1 "<command line>" 1
# 1 "<built-in>" 2
# 1 "././include/linux/kconfig.h" 1
# 1 "./include/generated/autoconf.h" 1
# 6 "././include/linux/kconfig.h" 2
# 2 "<built-in>" 2
# 1 "././include/linux/compiler_types.h" 1
# 59 "././include/linux/compiler_types.h"
# 1 "./include/linux/compiler_attributes.h" 1
# 60 "././include/linux/compiler_types.h" 2
# 1 "./include/linux/compiler-clang.h" 1
# 64 "././include/linux/compiler_types.h" 2
# 85 "././include/linux/compiler_types.h"
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
struct ftrace_likely_data {
struct ftrace_branch_data data;
unsigned long constant;
};
# 3 "<built-in>" 2
# 1 "arch/x86/mm/fault.c" 2
# 1 "./include/linux/sched.h" 1
# 10 "./include/linux/sched.h"
# 1 "./include/uapi/linux/sched.h" 1
# 11 "./include/linux/sched.h" 2
# 1 "./arch/x86/include/asm/current.h" 1
# 1 "./include/linux/compiler.h" 1
# 172 "./include/linux/compiler.h"
# 1 "./include/uapi/linux/types.h" 1
# 1 "./arch/x86/include/uapi/asm/types.h" 1
# 1 "./include/uapi/asm-generic/types.h" 1
# 1 "./include/asm-generic/int-ll64.h" 1
# 11 "./include/asm-generic/int-ll64.h"
# 1 "./include/uapi/asm-generic/int-ll64.h" 1
# 12 "./include/uapi/asm-generic/int-ll64.h"
# 1 "./arch/x86/include/uapi/asm/bitsperlong.h" 1
# 11 "./arch/x86/include/uapi/asm/bitsperlong.h"
# 1 "./include/asm-generic/bitsperlong.h" 1
# 1 "./include/uapi/asm-generic/bitsperlong.h" 1
# 6 "./include/asm-generic/bitsperlong.h" 2
# 12 "./arch/x86/include/uapi/asm/bitsperlong.h" 2
# 13 "./include/uapi/asm-generic/int-ll64.h" 2
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
# 12 "./include/asm-generic/int-ll64.h" 2
typedef __s8 s8;
typedef __u8 u8;
typedef __s16 s16;
typedef __u16 u16;
typedef __s32 s32;
typedef __u32 u32;
typedef __s64 s64;
typedef __u64 u64;
# 8 "./include/uapi/asm-generic/types.h" 2
# 6 "./arch/x86/include/uapi/asm/types.h" 2
# 6 "./include/uapi/linux/types.h" 2
# 1 "./include/uapi/linux/posix_types.h" 1
# 1 "./include/linux/stddef.h" 1
# 1 "./include/uapi/linux/stddef.h" 1
# 6 "./include/linux/stddef.h" 2
enum {
false = 0,
true = 1
};
# 6 "./include/uapi/linux/posix_types.h" 2
# 25 "./include/uapi/linux/posix_types.h"
typedef struct {
unsigned long fds_bits[1024 / (8 * sizeof(long))];
} __kernel_fd_set;
typedef void (*__kernel_sighandler_t)(int);
typedef int __kernel_key_t;
typedef int __kernel_mqd_t;
# 1 "./arch/x86/include/asm/posix_types.h" 1
# 1 "./arch/x86/include/uapi/asm/posix_types_64.h" 1
# 11 "./arch/x86/include/uapi/asm/posix_types_64.h"
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned long __kernel_old_dev_t;
# 1 "./include/uapi/asm-generic/posix_types.h" 1
# 15 "./include/uapi/asm-generic/posix_types.h"
typedef long __kernel_long_t;
typedef unsigned long __kernel_ulong_t;
typedef __kernel_ulong_t __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef __kernel_long_t __kernel_suseconds_t;
typedef int __kernel_daddr_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
# 72 "./include/uapi/asm-generic/posix_types.h"
typedef __kernel_ulong_t __kernel_size_t;
typedef __kernel_long_t __kernel_ssize_t;
typedef __kernel_long_t __kernel_ptrdiff_t;
typedef struct {
int val[2];
} __kernel_fsid_t;
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
typedef __kernel_long_t __kernel_time_t;
typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
# 19 "./arch/x86/include/uapi/asm/posix_types_64.h" 2
# 6 "./arch/x86/include/asm/posix_types.h" 2
# 37 "./include/uapi/linux/posix_types.h" 2
# 15 "./include/uapi/linux/types.h" 2
# 29 "./include/uapi/linux/types.h"
typedef __u16 __le16;
typedef __u16 __be16;
typedef __u32 __le32;
typedef __u32 __be32;
typedef __u64 __le64;
typedef __u64 __be64;
typedef __u16 __sum16;
typedef __u32 __wsum;
# 52 "./include/uapi/linux/types.h"
typedef unsigned __poll_t;
# 173 "./include/linux/compiler.h" 2
# 188 "./include/linux/compiler.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) __attribute__((__always_inline__))
void __read_once_size(const volatile void *p, void *res, int size)
{
({ switch (size) { case 1: *(__u8 *)res = *(volatile __u8 *)p; break; case 2: *(__u16 *)res = *(volatile __u16 *)p; break; case 4: *(__u32 *)res = *(volatile __u32 *)p; break; case 8: *(__u64 *)res = *(volatile __u64 *)p; break; default: __asm__ __volatile__("" : : : "memory"); __builtin_memcpy((void *)res, (const void *)p, size); __asm__ __volatile__("" : : : "memory"); } });
}
# 206 "./include/linux/compiler.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) __attribute__((__always_inline__))
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
({ switch (size) { case 1: *(__u8 *)res = *(volatile __u8 *)p; break; case 2: *(__u16 *)res = *(volatile __u16 *)p; break; case 4: *(__u32 *)res = *(volatile __u32 *)p; break; case 8: *(__u64 *)res = *(volatile __u64 *)p; break; default: __asm__ __volatile__("" : : : "memory"); __builtin_memcpy((void *)res, (const void *)p, size); __asm__ __volatile__("" : : : "memory"); } });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) __attribute__((__always_inline__)) void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
default:
__asm__ __volatile__("" : : : "memory");
__builtin_memcpy((void *)p, (const void *)res, size);
__asm__ __volatile__("" : : : "memory");
}
}
# 248 "./include/linux/compiler.h"
# 1 "./arch/x86/include/asm/barrier.h" 1
# 1 "./arch/x86/include/asm/alternative.h" 1
# 1 "./include/linux/types.h" 1
# 13 "./include/linux/types.h"
typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
typedef u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
typedef __kernel_loff_t loff_t;
# 55 "./include/linux/types.h"
typedef __kernel_size_t size_t;
typedef __kernel_ssize_t ssize_t;
typedef __kernel_ptrdiff_t ptrdiff_t;
typedef __kernel_time_t time_t;
typedef __kernel_clock_t clock_t;
typedef __kernel_caddr_t caddr_t;
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
typedef u8 u_int8_t;
typedef s8 int8_t;
typedef u16 u_int16_t;
typedef s16 int16_t;
typedef u32 u_int32_t;
typedef s32 int32_t;
typedef u8 uint8_t;
typedef u16 uint16_t;
typedef u32 uint32_t;
typedef u64 uint64_t;
typedef u64 u_int64_t;
typedef s64 int64_t;
# 134 "./include/linux/types.h"
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
# 153 "./include/linux/types.h"
typedef u64 dma_addr_t;
typedef unsigned gfp_t;
typedef unsigned slab_flags_t;
typedef unsigned fmode_t;
typedef u64 phys_addr_t;
typedef phys_addr_t resource_size_t;
typedef unsigned long irq_hw_number_t;
typedef struct {
int counter;
} atomic_t;
typedef struct {
long counter;
} atomic64_t;
struct list_head {
struct list_head *next, *prev;
};
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
# 224 "./include/linux/types.h"
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
} __attribute__((aligned(sizeof(void *))));
typedef void (*rcu_callback_t)(struct callback_head *head);
typedef void (*call_rcu_func_t)(struct callback_head *head, rcu_callback_t func);
# 8 "./arch/x86/include/asm/alternative.h" 2
# 1 "./include/linux/stringify.h" 1
# 10 "./arch/x86/include/asm/alternative.h" 2
# 1 "./arch/x86/include/asm/asm.h" 1
# 210 "./arch/x86/include/asm/asm.h"
register unsigned long current_stack_pointer asm("rsp");
# 11 "./arch/x86/include/asm/alternative.h" 2
# 48 "./arch/x86/include/asm/alternative.h"
struct alt_instr {
s32 instr_offset;
s32 repl_offset;
u16 cpuid;
u8 instrlen;
u8 replacementlen;
u8 padlen;
} __attribute__((__packed__));
extern int alternatives_patched;
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_enable_smp(void);
extern int alternatives_text_reserved(void *start, void *end);
extern bool skip_smp_alternatives;
# 6 "./arch/x86/include/asm/barrier.h" 2
# 1 "./arch/x86/include/asm/nops.h" 1
# 143 "./arch/x86/include/asm/nops.h"
extern const unsigned char * const *ideal_nops;
extern void arch_init_ideal_nops(void);
# 7 "./arch/x86/include/asm/barrier.h" 2
# 36 "./arch/x86/include/asm/barrier.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
unsigned long mask;
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
return mask;
}
# 86 "./arch/x86/include/asm/barrier.h"
# 1 "./include/asm-generic/barrier.h" 1
# 20 "./include/asm-generic/barrier.h"
# 1 "./include/linux/compiler.h" 1
# 21 "./include/asm-generic/barrier.h" 2
# 87 "./arch/x86/include/asm/barrier.h" 2
# 249 "./include/linux/compiler.h" 2
# 1 "./include/linux/kasan-checks.h" 1
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kasan_check_read(const volatile void *p, unsigned int size)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kasan_check_write(const volatile void *p, unsigned int size)
{ }
# 250 "./include/linux/compiler.h" 2
# 269 "./include/linux/compiler.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) __attribute__((__always_inline__))
unsigned long read_word_at_a_time(const void *addr)
{
kasan_check_read(addr, 1);
return *(unsigned long *)addr;
}
# 300 "./include/linux/compiler.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}
# 6 "./arch/x86/include/asm/current.h" 2
# 1 "./arch/x86/include/asm/percpu.h" 1
# 45 "./arch/x86/include/asm/percpu.h"
# 1 "./include/linux/kernel.h" 1
# 1 "/android1/llvm/build/lib/clang/9.0.0/include/stdarg.h" 1 3
# 30 "/android1/llvm/build/lib/clang/9.0.0/include/stdarg.h" 3
typedef __builtin_va_list va_list;
# 48 "/android1/llvm/build/lib/clang/9.0.0/include/stdarg.h" 3
typedef __builtin_va_list __gnuc_va_list;
# 7 "./include/linux/kernel.h" 2
# 1 "./include/linux/linkage.h" 1
# 1 "./include/linux/export.h" 1
# 61 "./include/linux/export.h"
struct kernel_symbol {
int value_offset;
int name_offset;
};
# 8 "./include/linux/linkage.h" 2
# 1 "./arch/x86/include/asm/linkage.h" 1
# 9 "./include/linux/linkage.h" 2
# 8 "./include/linux/kernel.h" 2
# 1 "./include/linux/bitops.h" 1
# 1 "./include/linux/bits.h" 1
# 6 "./include/linux/bitops.h" 2
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
# 1 "./arch/x86/include/asm/bitops.h" 1
# 18 "./arch/x86/include/asm/bitops.h"
# 1 "./arch/x86/include/asm/rmwcc.h" 1
# 19 "./arch/x86/include/asm/bitops.h" 2
# 72 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void
set_bit(long nr, volatile unsigned long *addr)
{
if ((__builtin_constant_p(nr))) {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orb %1,%0"
: "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
: "iq" ((u8)(1 << ((nr) & 7)))
: "memory");
} else {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btsq" " " " %1,%0"
: "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
}
}
# 95 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile(" " "btsq" " " " %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
}
# 110 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void
clear_bit(long nr, volatile unsigned long *addr)
{
if ((__builtin_constant_p(nr))) {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andb %1,%0"
: "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
: "iq" ((u8)~(1 << ((nr) & 7))));
} else {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btrq" " " " %1,%0"
: "+m" (*(volatile long *) (addr))
: "Ir" (nr));
}
}
# 132 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
__asm__ __volatile__("" : : : "memory");
clear_bit(nr, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile(" " "btrq" " " " %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andb %2,%1"
"\n\tset" "s" " %[_cc_" "s" "]\n"
: [_cc_s] "=qm" (negative), "+m" (*(volatile long *) (addr))
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
# 168 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
__asm__ __volatile__("" : : : "memory");
__clear_bit(nr, addr);
}
# 183 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile(" " "btcq" " " " %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
}
# 197 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void change_bit(long nr, volatile unsigned long *addr)
{
if ((__builtin_constant_p(nr))) {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorb %1,%0"
: "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
: "iq" ((u8)(1 << ((nr) & 7))));
} else {
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btcq" " " " %1,%0"
: "+m" (*(volatile long *) (addr))
: "Ir" (nr));
}
}
# 218 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btsq" " " " %[val], " "%[var]" "; j" "c" " %l[cc_label]" : : [var] "m" (*addr), [val] "Ir" (nr) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 230 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
# 245 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
asm(" " "btsq" " " " %2,%1"
"\n\tset" "c" " %[_cc_" "c" "]\n"
: [_cc_c] "=qm" (oldbit), "+m" (*(volatile long *) (addr))
: "Ir" (nr));
return oldbit;
}
# 264 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btrq" " " " %[val], " "%[var]" "; j" "c" " %l[cc_label]" : : [var] "m" (*addr), [val] "Ir" (nr) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 285 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(" " "btrq" " " " %2,%1"
"\n\tset" "c" " %[_cc_" "c" "]\n"
: [_cc_c] "=qm" (oldbit), "+m" (*(volatile long *) (addr))
: "Ir" (nr));
return oldbit;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(" " "btcq" " " " %2,%1"
"\n\tset" "c" " %[_cc_" "c" "]\n"
: [_cc_c] "=qm" (oldbit), "+m" (*(volatile long *) (addr))
: "Ir" (nr) : "memory");
return oldbit;
}
# 317 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btcq" " " " %[val], " "%[var]" "; j" "c" " %l[cc_label]" : : [var] "m" (*addr), [val] "Ir" (nr) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (64 -1))) &
(addr[nr >> 6])) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
bool oldbit;
asm volatile(" " "btq" " " " %2,%1"
"\n\tset" "c" " %[_cc_" "c" "]\n"
: [_cc_c] "=qm" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
# 360 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
# 410 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int ffs(int x)
{
int r;
# 424 "./arch/x86/include/asm/bitops.h"
asm("bsfl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
# 437 "./arch/x86/include/asm/bitops.h"
return r + 1;
}
# 451 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int fls(unsigned int x)
{
int r;
# 465 "./arch/x86/include/asm/bitops.h"
asm("bsrl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
# 478 "./arch/x86/include/asm/bitops.h"
return r + 1;
}
# 493 "./arch/x86/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int fls64(__u64 x)
{
int bitpos = -1;
asm("bsrq %1,%q0"
: "+r" (bitpos)
: "rm" (x));
return bitpos + 1;
}
# 1 "./include/asm-generic/bitops/find.h" 1
# 15 "./include/asm-generic/bitops/find.h"
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
# 30 "./include/asm-generic/bitops/find.h"
extern unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset);
# 45 "./include/asm-generic/bitops/find.h"
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
# 59 "./include/asm-generic/bitops/find.h"
extern unsigned long find_first_bit(const unsigned long *addr,
unsigned long size);
# 70 "./include/asm-generic/bitops/find.h"
extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
# 511 "./arch/x86/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/sched.h" 1
# 13 "./include/asm-generic/bitops/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sched_find_first_bit(const unsigned long *b)
{
if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
# 30 "./include/asm-generic/bitops/sched.h"
}
# 513 "./arch/x86/include/asm/bitops.h" 2
# 1 "./arch/x86/include/asm/arch_hweight.h" 1
# 1 "./arch/x86/include/asm/cpufeatures.h" 1
# 1 "./arch/x86/include/asm/required-features.h" 1
# 7 "./arch/x86/include/asm/cpufeatures.h" 2
# 1 "./arch/x86/include/asm/disabled-features.h" 1
# 11 "./arch/x86/include/asm/cpufeatures.h" 2
# 6 "./arch/x86/include/asm/arch_hweight.h" 2
# 17 "./arch/x86/include/asm/arch_hweight.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
asm ("661:\n\t" "call __sw_hweight32" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 4*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "popcntl %1, %0" "\n" "665""1" ":\n\t" ".popsection\n"
: "=""a" (res)
: "D" (w));
return res;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __arch_hweight16(unsigned int w)
{
return __arch_hweight32(w & 0xffff);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __arch_hweight8(unsigned int w)
{
return __arch_hweight32(w & 0xff);
}
# 45 "./arch/x86/include/asm/arch_hweight.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
asm ("661:\n\t" "call __sw_hweight64" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 4*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "popcntq %1, %0" "\n" "665""1" ":\n\t" ".popsection\n"
: "=""a" (res)
: "D" (w));
return res;
}
# 515 "./arch/x86/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/const_hweight.h" 1
# 517 "./arch/x86/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/le.h" 1
# 1 "./arch/x86/include/uapi/asm/byteorder.h" 1
# 1 "./include/linux/byteorder/little_endian.h" 1
# 1 "./include/uapi/linux/byteorder/little_endian.h" 1
# 13 "./include/uapi/linux/byteorder/little_endian.h"
# 1 "./include/linux/swab.h" 1
# 1 "./include/uapi/linux/swab.h" 1
# 1 "./arch/x86/include/uapi/asm/swab.h" 1
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __arch_swab32(__u32 val)
{
asm("bswapl %0" : "=r" (val) : "0" (val));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u64 __arch_swab64(__u64 val)
{
# 31 "./arch/x86/include/uapi/asm/swab.h"
asm("bswapq %0" : "=r" (val) : "0" (val));
return val;
}
# 8 "./include/uapi/linux/swab.h" 2
# 47 "./include/uapi/linux/swab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u16 __fswab16(__u16 val)
{
return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswab32(__u32 val)
{
return __arch_swab32(val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u64 __fswab64(__u64 val)
{
return __arch_swab64(val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
{
return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
{
return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8)));
}
# 161 "./include/uapi/linux/swab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __swab16p(const __u16 *p)
{
return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __swab32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __swab64p(const __u64 *p)
{
return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 __swahw32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 __swahb32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swab16s(__u16 *p)
{
*p = __swab16p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __swab32s(__u32 *p)
{
*p = __swab32p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __swab64s(__u64 *p)
{
*p = __swab64p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swahw32s(__u32 *p)
{
*p = __swahw32p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swahb32s(__u32 *p)
{
*p = __swahb32p(p);
}
# 6 "./include/linux/swab.h" 2
# 14 "./include/uapi/linux/byteorder/little_endian.h" 2
# 44 "./include/uapi/linux/byteorder/little_endian.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le64 __cpu_to_le64p(const __u64 *p)
{
return ( __le64)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __le64_to_cpup(const __le64 *p)
{
return ( __u64)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le32 __cpu_to_le32p(const __u32 *p)
{
return ( __le32)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __le32_to_cpup(const __le32 *p)
{
return ( __u32)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le16 __cpu_to_le16p(const __u16 *p)
{
return ( __le16)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __le16_to_cpup(const __le16 *p)
{
return ( __u16)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be64 __cpu_to_be64p(const __u64 *p)
{
return ( __be64)__swab64p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be32 __cpu_to_be32p(const __u32 *p)
{
return ( __be32)__swab32p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be16 __cpu_to_be16p(const __u16 *p)
{
return ( __be16)__swab16p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
# 6 "./include/linux/byteorder/little_endian.h" 2
# 1 "./include/linux/byteorder/generic.h" 1
# 144 "./include/linux/byteorder/generic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le16_add_cpu(__le16 *var, u16 val)
{
*var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le32_add_cpu(__le32 *var, u32 val)
{
*var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le64_add_cpu(__le64 *var, u64 val)
{
*var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le32_to_cpu_array(u32 *buf, unsigned int words)
{
while (words--) {
do { (void)(buf); } while (0);
buf++;
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_to_le32_array(u32 *buf, unsigned int words)
{
while (words--) {
do { (void)(buf); } while (0);
buf++;
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be16_add_cpu(__be16 *var, u16 val)
{
*var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be32_add_cpu(__be32 *var, u32 val)
{
*var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be64_add_cpu(__be64 *var, u64 val)
{
*var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
int i;
for (i = 0; i < len; i++)
dst[i] = (( __be32)(__builtin_constant_p((__u32)((src[i]))) ? ((__u32)( (((__u32)((src[i])) & (__u32)0x000000ffUL) << 24) | (((__u32)((src[i])) & (__u32)0x0000ff00UL) << 8) | (((__u32)((src[i])) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((src[i])) & (__u32)0xff000000UL) >> 24))) : __fswab32((src[i]))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
int i;
for (i = 0; i < len; i++)
dst[i] = (__builtin_constant_p((__u32)(( __u32)(__be32)(src[i]))) ? ((__u32)( (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(src[i])));
}
# 12 "./include/linux/byteorder/little_endian.h" 2
# 6 "./arch/x86/include/uapi/asm/byteorder.h" 2
# 7 "./include/asm-generic/bitops/le.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
# 53 "./include/asm-generic/bitops/le.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_bit_le(int nr, const void *addr)
{
return (__builtin_constant_p((nr ^ 0)) ? constant_test_bit((nr ^ 0), (addr)) : variable_test_bit((nr ^ 0), (addr)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_bit_le(int nr, void *addr)
{
set_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_bit_le(int nr, void *addr)
{
clear_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __clear_bit_le(int nr, void *addr)
{
__clear_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_bit_le(int nr, void *addr)
{
return test_and_set_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_bit_le(int nr, void *addr)
{
return test_and_clear_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __test_and_set_bit_le(int nr, void *addr)
{
return __test_and_set_bit(nr ^ 0, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __test_and_clear_bit_le(int nr, void *addr)
{
return __test_and_clear_bit(nr ^ 0, addr);
}
# 519 "./arch/x86/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/ext2-atomic-setbit.h" 1
# 521 "./arch/x86/include/asm/bitops.h" 2
# 20 "./include/linux/bitops.h" 2
# 43 "./include/linux/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((((unsigned int) ((!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))))) + ((unsigned int) ((!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))))))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((((unsigned int) ((!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))))) + ((unsigned int) ((!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))))))) + ((((unsigned int) ((!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))))) + ((unsigned int) ((!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7)))))))) : __arch_hweight64(w));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 rol64(__u64 word, unsigned int shift)
{
return (word << shift) | (word >> (64 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 ror64(__u64 word, unsigned int shift)
{
return (word >> shift) | (word << (64 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u16 rol16(__u16 word, unsigned int shift)
{
return (word << shift) | (word >> (16 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> shift) | (word << (16 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u8 rol8(__u8 word, unsigned int shift)
{
return (word << shift) | (word >> (8 - shift));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> shift) | (word << (8 - shift));
}
# 143 "./include/linux/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_count_order(unsigned int count)
{
int order;
order = fls(count) - 1;
if (count & (count - 1))
order++;
return order;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
else if (l & (l - 1UL))
return (int)fls_long(l);
else
return (int)fls_long(l) - 1;
}
# 201 "./include/linux/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __ffs64(u64 word)
{
return __ffs((unsigned long)word);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
set_bit(nr, addr);
else
clear_bit(nr, addr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
__set_bit(nr, addr);
else
__clear_bit(nr, addr);
}
# 277 "./include/linux/bitops.h"
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
# 12 "./include/linux/kernel.h" 2
# 1 "./include/linux/log2.h" 1
# 25 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
# 48 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
{
return 1UL << fls_long(n - 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
{
return 1UL << (fls_long(n) - 1);
}
# 201 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__))
int __order_base_2(unsigned long n)
{
return n > 1 ? ( __builtin_constant_p(n - 1) ? ( __builtin_constant_p(n - 1) ? ( (n - 1) < 2 ? 0 : (n - 1) & (1ULL << 63) ? 63 : (n - 1) & (1ULL << 62) ? 62 : (n - 1) & (1ULL << 61) ? 61 : (n - 1) & (1ULL << 60) ? 60 : (n - 1) & (1ULL << 59) ? 59 : (n - 1) & (1ULL << 58) ? 58 : (n - 1) & (1ULL << 57) ? 57 : (n - 1) & (1ULL << 56) ? 56 : (n - 1) & (1ULL << 55) ? 55 : (n - 1) & (1ULL << 54) ? 54 : (n - 1) & (1ULL << 53) ? 53 : (n - 1) & (1ULL << 52) ? 52 : (n - 1) & (1ULL << 51) ? 51 : (n - 1) & (1ULL << 50) ? 50 : (n - 1) & (1ULL << 49) ? 49 : (n - 1) & (1ULL << 48) ? 48 : (n - 1) & (1ULL << 47) ? 47 : (n - 1) & (1ULL << 46) ? 46 : (n - 1) & (1ULL << 45) ? 45 : (n - 1) & (1ULL << 44) ? 44 : (n - 1) & (1ULL << 43) ? 43 : (n - 1) & (1ULL << 42) ? 42 : (n - 1) & (1ULL << 41) ? 41 : (n - 1) & (1ULL << 40) ? 40 : (n - 1) & (1ULL << 39) ? 39 : (n - 1) & (1ULL << 38) ? 38 : (n - 1) & (1ULL << 37) ? 37 : (n - 1) & (1ULL << 36) ? 36 : (n - 1) & (1ULL << 35) ? 35 : (n - 1) & (1ULL << 34) ? 34 : (n - 1) & (1ULL << 33) ? 33 : (n - 1) & (1ULL << 32) ? 32 : (n - 1) & (1ULL << 31) ? 31 : (n - 1) & (1ULL << 30) ? 30 : (n - 1) & (1ULL << 29) ? 29 : (n - 1) & (1ULL << 28) ? 28 : (n - 1) & (1ULL << 27) ? 27 : (n - 1) & (1ULL << 26) ? 26 : (n - 1) & (1ULL << 25) ? 25 : (n - 1) & (1ULL << 24) ? 24 : (n - 1) & (1ULL << 23) ? 23 : (n - 1) & (1ULL << 22) ? 22 : (n - 1) & (1ULL << 21) ? 21 : (n - 1) & (1ULL << 20) ? 20 : (n - 1) & (1ULL << 19) ? 19 : (n - 1) & (1ULL << 18) ? 18 : (n - 1) & (1ULL << 17) ? 17 : (n - 1) & (1ULL << 16) ? 16 : (n - 1) & (1ULL << 15) ? 15 : (n - 1) & (1ULL << 14) ? 14 : (n - 1) & (1ULL << 13) ? 13 : (n - 1) & (1ULL << 12) ? 12 : (n - 1) & (1ULL << 11) ? 11 : (n - 1) & (1ULL << 10) ? 10 : (n - 1) & (1ULL << 9) ? 9 : (n - 1) & (1ULL << 8) ? 8 : (n - 1) & (1ULL << 7) ? 7 : (n - 1) & (1ULL << 6) ? 6 : (n - 1) & (1ULL << 5) ? 5 : (n - 1) & (1ULL << 4) ? 4 : (n - 1) & (1ULL << 3) ? 3 : (n - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(n - 1) <= 4) ? __ilog2_u32(n - 1) : __ilog2_u64(n - 1) ) + 1 : 0;
}
# 13 "./include/linux/kernel.h" 2
# 1 "./include/linux/typecheck.h" 1
# 14 "./include/linux/kernel.h" 2
# 1 "./include/linux/printk.h" 1
# 1 "./include/linux/init.h" 1
# 116 "./include/linux/init.h"
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
typedef int initcall_entry_t;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) initcall_t initcall_from_entry(initcall_entry_t *entry)
{
return offset_to_ptr(entry);
}
# 135 "./include/linux/init.h"
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
typedef void (*ctor_fn_t)(void);
extern int do_one_initcall(initcall_t fn);
extern char __attribute__((__section__(".init.data"))) boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;
void setup_arch(char **);
void prepare_namespace(void);
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) init_rootfs(void);
extern bool rodata_enabled;
void mark_rodata_ro(void);
extern void (*late_time_init)(void);
extern bool initcall_debug;
# 238 "./include/linux/init.h"
struct obs_kernel_param {
const char *str;
int (*setup_func)(char *);
int early;
};
# 287 "./include/linux/init.h"
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) parse_early_param(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) parse_early_options(char *cmdline);
# 7 "./include/linux/printk.h" 2
# 1 "./include/linux/kern_levels.h" 1
# 8 "./include/linux/printk.h" 2
# 1 "./include/linux/cache.h" 1
# 1 "./include/uapi/linux/kernel.h" 1
# 1 "./include/uapi/linux/sysinfo.h" 1
struct sysinfo {
__kernel_long_t uptime;
__kernel_ulong_t loads[3];
__kernel_ulong_t totalram;
__kernel_ulong_t freeram;
__kernel_ulong_t sharedram;
__kernel_ulong_t bufferram;
__kernel_ulong_t totalswap;
__kernel_ulong_t freeswap;
__u16 procs;
__u16 pad;
__kernel_ulong_t totalhigh;
__kernel_ulong_t freehigh;
__u32 mem_unit;
char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)];
};
# 6 "./include/uapi/linux/kernel.h" 2
# 6 "./include/linux/cache.h" 2
# 1 "./arch/x86/include/asm/cache.h" 1
# 7 "./include/linux/cache.h" 2
# 10 "./include/linux/printk.h" 2
extern const char linux_banner[];
extern const char linux_proc_banner[];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int printk_get_level(const char *buffer)
{
if (buffer[0] == '\001' && buffer[1]) {
switch (buffer[1]) {
case '0' ... '7':
case 'd':
case 'c':
return buffer[1];
}
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *printk_skip_level(const char *buffer)
{
if (printk_get_level(buffer))
return buffer + 2;
return buffer;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *printk_skip_headers(const char *buffer)
{
while (printk_get_level(buffer))
buffer = printk_skip_level(buffer);
return buffer;
}
# 63 "./include/linux/printk.h"
extern int console_printk[];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void console_silent(void)
{
(console_printk[0]) = 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void console_verbose(void)
{
if ((console_printk[0]))
(console_printk[0]) = 15;
}
extern char devkmsg_log_str[];
struct ctl_table;
struct va_format {
const char *fmt;
va_list *va;
};
# 141 "./include/linux/printk.h"
extern __attribute__((__format__(printf, 1, 2)))
void early_printk(const char *fmt, ...);
extern void printk_nmi_enter(void);
extern void printk_nmi_exit(void);
extern void printk_nmi_direct_enter(void);
extern void printk_nmi_direct_exit(void);
# 161 "./include/linux/printk.h"
__attribute__((__format__(printf, 5, 0)))
int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args);
__attribute__((__format__(printf, 1, 0)))
int vprintk(const char *fmt, va_list args);
__attribute__((__format__(printf, 1, 2))) __attribute__((__cold__))
int printk(const char *fmt, ...);
__attribute__((__format__(printf, 1, 2))) __attribute__((__cold__)) int printk_deferred(const char *fmt, ...);
extern int __printk_ratelimit(const char *func);
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int
devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos);
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_vmcoreinfo_setup(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) setup_log_buf(int early);
__attribute__((__format__(printf, 1, 2))) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern void dump_stack(void) __attribute__((__cold__));
extern void printk_safe_init(void);
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
# 284 "./include/linux/printk.h"
extern int kptr_restrict;
# 476 "./include/linux/printk.h"
extern const struct file_operations kmsg_fops;
enum {
DUMP_PREFIX_NONE,
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len);
# 521 "./include/linux/printk.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void print_hex_dump_debug(const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
}
# 15 "./include/linux/kernel.h" 2
# 1 "./include/linux/build_bug.h" 1
# 16 "./include/linux/kernel.h" 2
# 236 "./include/linux/kernel.h"
struct completion;
struct pt_regs;
struct user;
extern int _cond_resched(void);
# 264 "./include/linux/kernel.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ___might_sleep(const char *file, int line,
int preempt_offset) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __might_sleep(const char *file, int line,
int preempt_offset) { }
# 311 "./include/linux/kernel.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void might_fault(void) { }
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(int state);
__attribute__((__format__(printf, 1, 2)))
void panic(const char *fmt, ...) __attribute__((__noreturn__)) __attribute__((__cold__));
void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
extern int oops_may_print(void);
void do_exit(long error_code) __attribute__((__noreturn__));
void complete_and_exit(struct completion *, long) __attribute__((__noreturn__));
void refcount_error_report(struct pt_regs *regs, const char *err);
int __attribute__((__warn_unused_result__)) _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __attribute__((__warn_unused_result__)) _kstrtol(const char *s, unsigned int base, long *res);
int __attribute__((__warn_unused_result__)) kstrtoull(const char *s, unsigned int base, unsigned long long *res);
int __attribute__((__warn_unused_result__)) kstrtoll(const char *s, unsigned int base, long long *res);
# 366 "./include/linux/kernel.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
if (sizeof(unsigned long) == sizeof(unsigned long long) &&
__alignof__(unsigned long) == __alignof__(unsigned long long))
return kstrtoull(s, base, (unsigned long long *)res);
else
return _kstrtoul(s, base, res);
}
# 395 "./include/linux/kernel.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtol(const char *s, unsigned int base, long *res)
{
if (sizeof(long) == sizeof(long long) &&
__alignof__(long) == __alignof__(long long))
return kstrtoll(s, base, (long long *)res);
else
return _kstrtol(s, base, res);
}
int __attribute__((__warn_unused_result__)) kstrtouint(const char *s, unsigned int base, unsigned int *res);
int __attribute__((__warn_unused_result__)) kstrtoint(const char *s, unsigned int base, int *res);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou64(const char *s, unsigned int base, u64 *res)
{
return kstrtoull(s, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos64(const char *s, unsigned int base, s64 *res)
{
return kstrtoll(s, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou32(const char *s, unsigned int base, u32 *res)
{
return kstrtouint(s, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos32(const char *s, unsigned int base, s32 *res)
{
return kstrtoint(s, base, res);
}
int __attribute__((__warn_unused_result__)) kstrtou16(const char *s, unsigned int base, u16 *res);
int __attribute__((__warn_unused_result__)) kstrtos16(const char *s, unsigned int base, s16 *res);
int __attribute__((__warn_unused_result__)) kstrtou8(const char *s, unsigned int base, u8 *res);
int __attribute__((__warn_unused_result__)) kstrtos8(const char *s, unsigned int base, s8 *res);
int __attribute__((__warn_unused_result__)) kstrtobool(const char *s, bool *res);
int __attribute__((__warn_unused_result__)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res);
int __attribute__((__warn_unused_result__)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res);
int __attribute__((__warn_unused_result__)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res);
int __attribute__((__warn_unused_result__)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res);
int __attribute__((__warn_unused_result__)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res);
int __attribute__((__warn_unused_result__)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res);
int __attribute__((__warn_unused_result__)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res);
int __attribute__((__warn_unused_result__)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res);
int __attribute__((__warn_unused_result__)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res);
int __attribute__((__warn_unused_result__)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res);
int __attribute__((__warn_unused_result__)) kstrtobool_from_user(const char *s, size_t count, bool *res);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res)
{
return kstrtoull_from_user(s, count, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res)
{
return kstrtoll_from_user(s, count, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res)
{
return kstrtouint_from_user(s, count, base, res);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res)
{
return kstrtoint_from_user(s, count, base, res);
}
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
extern int num_to_str(char *buf, int size,
unsigned long long num, unsigned int width);
extern __attribute__((__format__(printf, 2, 3))) int sprintf(char *buf, const char * fmt, ...);
extern __attribute__((__format__(printf, 2, 0))) int vsprintf(char *buf, const char *, va_list);
extern __attribute__((__format__(printf, 3, 4)))
int snprintf(char *buf, size_t size, const char *fmt, ...);
extern __attribute__((__format__(printf, 3, 0)))
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 3, 4)))
int scnprintf(char *buf, size_t size, const char *fmt, ...);
extern __attribute__((__format__(printf, 3, 0)))
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 2, 3))) __attribute__((__malloc__))
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern __attribute__((__format__(printf, 2, 0))) __attribute__((__malloc__))
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 2, 0)))
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
extern __attribute__((__format__(scanf, 2, 3)))
int sscanf(const char *, const char *, ...);
extern __attribute__((__format__(scanf, 2, 0)))
int vsscanf(const char *, const char *, va_list);
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
extern int init_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
unsigned long int_sqrt(unsigned long);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 int_sqrt64(u64 x)
{
return (u32)int_sqrt(x);
}
extern void bust_spinlocks(int yes);
extern int oops_in_progress;
extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
extern atomic_t panic_cpu;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_arch_panic_timeout(int timeout, int arch_default_timeout)
{
if (panic_timeout == arch_default_timeout)
panic_timeout = timeout;
}
extern const char *print_tainted(void);
enum lockdep_ok {
LOCKDEP_STILL_OK,
LOCKDEP_NOW_UNRELIABLE
};
extern void add_taint(unsigned flag, enum lockdep_ok);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
} system_state;
# 604 "./include/linux/kernel.h"
struct taint_flag {
char c_true;
char c_false;
bool module;
};
extern const struct taint_flag taint_flags[18];
extern const char hex_asc[];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char *hex_byte_pack(char *buf, u8 byte)
{
*buf++ = hex_asc[((byte) & 0xf0) >> 4];
*buf++ = hex_asc[((byte) & 0x0f)];
return buf;
}
extern const char hex_asc_upper[];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char *hex_byte_pack_upper(char *buf, u8 byte)
{
*buf++ = hex_asc_upper[((byte) & 0xf0) >> 4];
*buf++ = hex_asc_upper[((byte) & 0x0f)];
return buf;
}
extern int hex_to_bin(char ch);
extern int __attribute__((__warn_unused_result__)) hex2bin(u8 *dst, const char *src, size_t count);
extern char *bin2hex(char *dst, const void *src, size_t count);
bool mac_pton(const char *s, u8 *mac);
# 660 "./include/linux/kernel.h"
enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
};
void tracing_on(void);
void tracing_off(void);
int tracing_is_on(void);
void tracing_snapshot(void);
void tracing_snapshot_alloc(void);
extern void tracing_start(void);
extern void tracing_stop(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__format__(printf, 1, 2)))
void ____trace_printk_check_format(const char *fmt, ...)
{
}
# 739 "./include/linux/kernel.h"
extern __attribute__((__format__(printf, 2, 3)))
int __trace_bprintk(unsigned long ip, const char *fmt, ...);
extern __attribute__((__format__(printf, 2, 3)))
int __trace_printk(unsigned long ip, const char *fmt, ...);
# 780 "./include/linux/kernel.h"
extern int __trace_bputs(unsigned long ip, const char *str);
extern int __trace_puts(unsigned long ip, const char *str, int size);
extern void trace_dump_stack(int skip);
# 802 "./include/linux/kernel.h"
extern __attribute__((__format__(printf, 2, 0))) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
extern __attribute__((__format__(printf, 2, 0))) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
# 46 "./arch/x86/include/asm/percpu.h" 2
# 88 "./arch/x86/include/asm/percpu.h"
extern void __bad_percpu_size(void);
# 512 "./arch/x86/include/asm/percpu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long *addr)
{
unsigned long *a =
(unsigned long *)addr + nr / 64;
return ((1UL << (nr % 64)) & ({ typeof(*a) pfo_ret__; switch (sizeof(*a)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (*a)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; default: __bad_percpu_size(); } pfo_ret__; })) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool x86_this_cpu_variable_test_bit(int nr,
const unsigned long *addr)
{
bool oldbit;
asm volatile("btl ""%%""gs"":" "%" "2"",%1"
"\n\tset" "c" " %[_cc_" "c" "]\n"
: [_cc_c] "=qm" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
# 1 "./include/asm-generic/percpu.h" 1
# 1 "./include/linux/threads.h" 1
# 7 "./include/asm-generic/percpu.h" 2
# 1 "./include/linux/percpu-defs.h" 1
# 308 "./include/linux/percpu-defs.h"
extern void __bad_size_call_parameter(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __this_cpu_preempt_check(const char *op) { }
# 8 "./include/asm-generic/percpu.h" 2
# 19 "./include/asm-generic/percpu.h"
extern unsigned long __per_cpu_offset[64];
# 48 "./include/asm-generic/percpu.h"
extern void setup_per_cpu_areas(void);
# 545 "./arch/x86/include/asm/percpu.h" 2
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(unsigned long) this_cpu_off;
# 7 "./arch/x86/include/asm/current.h" 2
struct task_struct;
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) current_task;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct task_struct *get_current(void)
{
return ({ typeof(current_task) pfo_ret__; switch (sizeof(current_task)) { case 1: asm("mov" "b ""%%""gs"":" "%" "P1"",%0" : "=q" (pfo_ret__) : "p" (&(current_task))); break; case 2: asm("mov" "w ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 4: asm("mov" "l ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 8: asm("mov" "q ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; default: __bad_percpu_size(); } pfo_ret__; });
}
# 13 "./include/linux/sched.h" 2
# 1 "./include/linux/pid.h" 1
# 1 "./include/linux/rculist.h" 1
# 10 "./include/linux/rculist.h"
# 1 "./include/linux/list.h" 1
# 1 "./include/linux/poison.h" 1
# 8 "./include/linux/list.h" 2
# 1 "./include/linux/const.h" 1
# 1 "./include/uapi/linux/const.h" 1
# 5 "./include/linux/const.h" 2
# 9 "./include/linux/list.h" 2
# 26 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_LIST_HEAD(struct list_head *list)
{
({ union { typeof(list->next) __val; char __c[1]; } __u = { .__val = ( typeof(list->next)) (list) }; __write_once_size(&(list->next), __u.__c, sizeof(list->next)); __u.__val; });
list->prev = list;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __list_add_valid(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __list_del_entry_valid(struct list_head *entry)
{
return true;
}
# 56 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;
next->prev = new;
new->next = next;
new->prev = prev;
({ union { typeof(prev->next) __val; char __c[1]; } __u = { .__val = ( typeof(prev->next)) (new) }; __write_once_size(&(prev->next), __u.__c, sizeof(prev->next)); __u.__val; });
}
# 77 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
# 91 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
# 103 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
({ union { typeof(prev->next) __val; char __c[1]; } __u = { .__val = ( typeof(prev->next)) (next) }; __write_once_size(&(prev->next), __u.__c, sizeof(prev->next)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
return;
__list_del(entry->prev, entry->next);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del(struct list_head *entry)
{
__list_del_entry(entry);
entry->next = ((void *) 0x100 + (0xdead000000000000UL));
entry->prev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 137 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del_init(struct list_head *entry)
{
__list_del_entry(entry);
INIT_LIST_HEAD(entry);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_move(struct list_head *list, struct list_head *head)
{
__list_del_entry(list);
list_add(list, head);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del_entry(list);
list_add_tail(list, head);
}
# 195 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_bulk_move_tail(struct list_head *head,
struct list_head *first,
struct list_head *last)
{
first->prev->next = last->next;
last->next->prev = first->prev;
head->prev->next = first;
first->prev = head->prev;
last->next = head;
head->prev = last;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_is_last(const struct list_head *list,
const struct list_head *head)
{
return list->next == head;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_empty(const struct list_head *head)
{
return ({ union { typeof(head->next) __val; char __c[1]; } __u; if (1) __read_once_size(&(head->next), __u.__c, sizeof(head->next)); else __read_once_size_nocheck(&(head->next), __u.__c, sizeof(head->next)); do { } while (0); __u.__val; }) == head;
}
# 242 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_rotate_left(struct list_head *head)
{
struct list_head *first;
if (!list_empty(head)) {
first = head->next;
list_move_tail(first, head);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
# 297 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) &&
(head->next != entry && head != entry))
return;
if (entry == head)
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
# 325 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_cut_before(struct list_head *list,
struct list_head *head,
struct list_head *entry)
{
if (head->next == entry) {
INIT_LIST_HEAD(list);
return;
}
list->next = head->next;
list->next->prev = list;
list->prev = entry->prev;
list->prev->next = list;
head->next = entry;
entry->prev = head;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
# 386 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
# 403 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
# 681 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = ((void *)0);
h->pprev = ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_empty(const struct hlist_head *h)
{
return !({ union { typeof(h->first) __val; char __c[1]; } __u; if (1) __read_once_size(&(h->first), __u.__c, sizeof(h->first)); else __read_once_size_nocheck(&(h->first), __u.__c, sizeof(h->first)); do { } while (0); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
({ union { typeof(*pprev) __val; char __c[1]; } __u = { .__val = ( typeof(*pprev)) (next) }; __write_once_size(&(*pprev), __u.__c, sizeof(*pprev)); __u.__val; });
if (next)
next->pprev = pprev;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = ((void *) 0x100 + (0xdead000000000000UL));
n->pprev = ((void *) 0x200 + (0xdead000000000000UL));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
({ union { typeof(h->first) __val; char __c[1]; } __u = { .__val = ( typeof(h->first)) (n) }; __write_once_size(&(h->first), __u.__c, sizeof(h->first)); __u.__val; });
n->pprev = &h->first;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
({ union { typeof(*(n->pprev)) __val; char __c[1]; } __u = { .__val = ( typeof(*(n->pprev))) (n) }; __write_once_size(&(*(n->pprev)), __u.__c, sizeof(*(n->pprev))); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
({ union { typeof(prev->next) __val; char __c[1]; } __u = { .__val = ( typeof(prev->next)) (n) }; __write_once_size(&(prev->next), __u.__c, sizeof(prev->next)); __u.__val; });
n->pprev = &prev->next;
if (n->next)
n->next->pprev = &n->next;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
{
return !n->next && n->pprev == &h->first;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = ((void *)0);
}
# 11 "./include/linux/rculist.h" 2
# 1 "./include/linux/rcupdate.h" 1
# 38 "./include/linux/rcupdate.h"
# 1 "./include/linux/atomic.h" 1
# 1 "./arch/x86/include/asm/atomic.h" 1
# 1 "./arch/x86/include/asm/cmpxchg.h" 1
# 13 "./arch/x86/include/asm/cmpxchg.h"
extern void __xchg_wrong_size(void)
;
extern void __cmpxchg_wrong_size(void)
;
extern void __xadd_wrong_size(void)
;
extern void __add_wrong_size(void)
;
# 145 "./arch/x86/include/asm/cmpxchg.h"
# 1 "./arch/x86/include/asm/cmpxchg_64.h" 1
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_64bit(volatile u64 *ptr, u64 val)
{
*ptr = val;
}
# 146 "./arch/x86/include/asm/cmpxchg.h" 2
# 9 "./arch/x86/include/asm/atomic.h" 2
# 25 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_read(const atomic_t *v)
{
return ({ union { typeof((v)->counter) __val; char __c[1]; } __u; if (1) __read_once_size(&((v)->counter), __u.__c, sizeof((v)->counter)); else __read_once_size_nocheck(&((v)->counter), __u.__c, sizeof((v)->counter)); do { } while (0); __u.__val; });
}
# 41 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_set(atomic_t *v, int i)
{
({ union { typeof(v->counter) __val; char __c[1]; } __u = { .__val = ( typeof(v->counter)) (i) }; __write_once_size(&(v->counter), __u.__c, sizeof(v->counter)); __u.__val; });
}
# 53 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_add(int i, atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
# 67 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_sub(int i, atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
# 83 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subl" " %[val], " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter), [val] "er" (i) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 95 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_inc(atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incl %0"
: "+m" (v->counter));
}
# 108 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_dec(atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl %0"
: "+m" (v->counter));
}
# 123 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_dec_and_test(atomic_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl" " " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 137 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_inc_and_test(atomic_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incl" " " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 152 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_add_negative(int i, atomic_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addl" " %[val], " "%[var]" "; j" "s" " %l[cc_label]" : : [var] "m" (v->counter), [val] "er" (i) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 165 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_add_return(int i, atomic_t *v)
{
return i + ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
# 177 "./arch/x86/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_add(int i, atomic_t *v)
{
return ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return ({ __typeof__ (*(((&v->counter)))) __ret = (((-i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return ({ __typeof__(*((&v->counter))) __ret; __typeof__(*((&v->counter))) __old = ((old)); __typeof__(*((&v->counter))) __new = ((new)); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return ({ bool success; __typeof__(((&v->counter))) _old = (__typeof__(((&v->counter))))(((old))); __typeof__(*(((&v->counter)))) __old = *_old; __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "q" (__new) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } default: __cmpxchg_wrong_size(); } if (__builtin_expect(!!(!success), 0)) *_old = __old; __builtin_expect(!!(success), 1); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_atomic_xchg(atomic_t *v, int new)
{
return ({ __typeof__ (*((&v->counter))) __ret = ((new)); switch (sizeof(*((&v->counter)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic_and(int i, atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic_or(int i, atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic_xor(int i, atomic_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
return val;
}
# 1 "./arch/x86/include/asm/atomic64_64.h" 1
# 20 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_read(const atomic64_t *v)
{
return ({ union { typeof((v)->counter) __val; char __c[1]; } __u; if (1) __read_once_size(&((v)->counter), __u.__c, sizeof((v)->counter)); else __read_once_size_nocheck(&((v)->counter), __u.__c, sizeof((v)->counter)); do { } while (0); __u.__val; });
}
# 32 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_set(atomic64_t *v, long i)
{
({ union { typeof(v->counter) __val; char __c[1]; } __u = { .__val = ( typeof(v->counter)) (i) }; __write_once_size(&(v->counter), __u.__c, sizeof(v->counter)); __u.__val; });
}
# 44 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_add(long i, atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
# 58 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_sub(long i, atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
# 74 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subq" " %[val], " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter), [val] "er" (i) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 86 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
# 100 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
# 116 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_dec_and_test(atomic64_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decq" " " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 130 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_inc_and_test(atomic64_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incq" " " "%[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (v->counter) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 145 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addq" " %[val], " "%[var]" "; j" "s" " %l[cc_label]" : : [var] "m" (v->counter), [val] "er" (i) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
# 158 "./arch/x86/include/asm/atomic64_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long arch_atomic64_add_return(long i, atomic64_t *v)
{
return i + ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_sub_return(long i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_fetch_add(long i, atomic64_t *v)
{
return ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_fetch_sub(long i, atomic64_t *v)
{
return ({ __typeof__ (*(((&v->counter)))) __ret = (((-i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return ({ __typeof__(*((&v->counter))) __ret; __typeof__(*((&v->counter))) __old = ((old)); __typeof__(*((&v->counter))) __new = ((new)); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
{
return ({ bool success; __typeof__(((&v->counter))) _old = (__typeof__(((&v->counter))))(((old))); __typeof__(*(((&v->counter)))) __old = *_old; __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "q" (__new) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %[new], %[ptr]" "\n\tset" "z" " %[_cc_" "z" "]\n" : [_cc_z] "=qm" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } default: __cmpxchg_wrong_size(); } if (__builtin_expect(!!(!success), 0)) *_old = __old; __builtin_expect(!!(success), 1); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_xchg(atomic64_t *v, long new)
{
return ({ __typeof__ (*((&v->counter))) __ret = ((new)); switch (sizeof(*((&v->counter)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_and(long i, atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_fetch_and(long i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_or(long i, atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_fetch_or(long i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_xor(long i, atomic64_t *v)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_atomic64_fetch_xor(long i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
do {
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
# 263 "./arch/x86/include/asm/atomic.h" 2
# 1 "./include/asm-generic/atomic-instrumented.h" 1
# 19 "./include/asm-generic/atomic-instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_read(const atomic_t *v)
{
kasan_check_read(v, sizeof(*v));
return arch_atomic_read(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_read(const atomic64_t *v)
{
kasan_check_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_set(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_set(atomic64_t *v, s64 i)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_xchg(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_xchg(atomic64_t *v, s64 i)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_cmpxchg(atomic_t *v, int old, int new)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
kasan_check_write(v, sizeof(*v));
kasan_check_read(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
kasan_check_write(v, sizeof(*v));
kasan_check_read(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
# 107 "./include/asm-generic/atomic-instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
# 141 "./include/asm-generic/atomic-instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_add(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_add(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_sub(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_sub(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_and(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_and(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_or(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_or(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_xor(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic64_xor(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
# 257 "./include/asm-generic/atomic-instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_add_return(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_add_return(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_sub_return(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_sub_return(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_fetch_add(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_fetch_add(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_fetch_sub(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_fetch_and(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_fetch_and(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_fetch_or(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_fetch_or(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int atomic_fetch_xor(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
# 266 "./arch/x86/include/asm/atomic.h" 2
# 8 "./include/linux/atomic.h" 2
# 573 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
do {
if (__builtin_expect(!!(c == u), 0))
break;
} while (!atomic_try_cmpxchg(v, &c, c + a));
return c;
}
# 595 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic_add_unless(atomic_t *v, int a, int u)
{
return atomic_fetch_add_unless(v, a, u) != u;
}
# 674 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic_inc_unless_negative(atomic_t *v)
{
int c = atomic_read(v);
do {
if (__builtin_expect(!!(c < 0), 0))
return false;
} while (!atomic_try_cmpxchg(v, &c, c + 1));
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic_dec_unless_positive(atomic_t *v)
{
int c = atomic_read(v);
do {
if (__builtin_expect(!!(c > 0), 0))
return false;
} while (!atomic_try_cmpxchg(v, &c, c - 1));
return true;
}
# 709 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_dec_if_positive(atomic_t *v)
{
int dec, c = atomic_read(v);
do {
dec = c - 1;
if (__builtin_expect(!!(dec < 0), 0))
break;
} while (!atomic_try_cmpxchg(v, &c, dec));
return dec;
}
# 1161 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
long long u)
{
long long c = atomic64_read(v);
do {
if (__builtin_expect(!!(c == u), 0))
break;
} while (!atomic64_try_cmpxchg(v, &c, c + a));
return c;
}
# 1184 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
return atomic64_fetch_add_unless(v, a, u) != u;
}
# 1263 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic64_inc_unless_negative(atomic64_t *v)
{
long long c = atomic64_read(v);
do {
if (__builtin_expect(!!(c < 0), 0))
return false;
} while (!atomic64_try_cmpxchg(v, &c, c + 1));
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool atomic64_dec_unless_positive(atomic64_t *v)
{
long long c = atomic64_read(v);
do {
if (__builtin_expect(!!(c > 0), 0))
return false;
} while (!atomic64_try_cmpxchg(v, &c, c - 1));
return true;
}
# 1298 "./include/linux/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long long atomic64_dec_if_positive(atomic64_t *v)
{
long long dec, c = atomic64_read(v);
do {
dec = c - 1;
if (__builtin_expect(!!(dec < 0), 0))
break;
} while (!atomic64_try_cmpxchg(v, &c, dec));
return dec;
}
# 1 "./include/asm-generic/atomic-long.h" 1
# 24 "./include/asm-generic/atomic-long.h"
typedef atomic64_t atomic_long_t;
# 47 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_read(const atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_read(v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_read_acquire(const atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)({ typeof(*&(v)->counter) ___p1 = ({ union { typeof(*&(v)->counter) __val; char __c[1]; } __u; if (1) __read_once_size(&(*&(v)->counter), __u.__c, sizeof(*&(v)->counter)); else __read_once_size_nocheck(&(*&(v)->counter), __u.__c, sizeof(*&(v)->counter)); do { } while (0); __u.__val; }); do { extern void __compiletime_assert_48(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_48(); } while (0); __asm__ __volatile__("" : : : "memory"); ___p1; }); }
# 59 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void atomic_long_set(atomic_long_t *l, long i) { atomic64_t *v = (atomic64_t *)l; atomic64_set(v, i); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void atomic_long_set_release(atomic_long_t *l, long i) { atomic64_t *v = (atomic64_t *)l; do { do { extern void __compiletime_assert_60(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_60(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(v)->counter) __val; char __c[1]; } __u = { .__val = ( typeof(*&(v)->counter)) ((i)) }; __write_once_size(&(*&(v)->counter), __u.__c, sizeof(*&(v)->counter)); __u.__val; }); } while (0); }
# 72 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_add_return(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_add_return_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_add_return_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_add_return_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_sub_return(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_sub_return_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_sub_return_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_sub_return_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(i, v); }
# 119 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_inc(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_sub(1, (v));
}
# 142 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_add(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_add_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_add_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_add_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_sub(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_sub_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_sub_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_and(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_and_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_and_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_and_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_andnot(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(~(long long)(i), (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(~(long long)(i), (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(~(long long)(i), (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_andnot_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_and(~(long long)(i), (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_or(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_or(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_or_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_or(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_or_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_or(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_or_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_or(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_xor(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_xor(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_xor(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_xor_acquire(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_xor(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_xor_release(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_xor(i, v); }
# 178 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_inc(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_inc_relaxed(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_inc_acquire(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_inc_release(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_add(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_dec(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_dec_relaxed(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_dec_acquire(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_fetch_dec_release(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_fetch_sub(1, (v)); }
# 198 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_add(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_add(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_sub(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_sub(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_and(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_and(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_andnot(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_and(~(long long)(i), (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_or(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_or(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void atomic_long_xor(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; atomic64_xor(i, v); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_sub_and_test(i, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_dec_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_inc_and_test(v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_add_negative(i, v);
}
# 243 "./include/asm-generic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_inc_return(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_inc_return_relaxed(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_inc_return_acquire(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_inc_return_release(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_add_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_dec_return(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_dec_return_relaxed(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_dec_return_acquire(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_dec_return_release(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; return (long)atomic64_sub_return(1, (v)); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_unless(v, a, u);
}
# 1316 "./include/linux/atomic.h" 2
# 39 "./include/linux/rcupdate.h" 2
# 1 "./include/linux/irqflags.h" 1
# 16 "./include/linux/irqflags.h"
# 1 "./arch/x86/include/asm/irqflags.h" 1
# 1 "./arch/x86/include/asm/processor-flags.h" 1
# 1 "./arch/x86/include/uapi/asm/processor-flags.h" 1
# 6 "./arch/x86/include/asm/processor-flags.h" 2
# 1 "./include/linux/mem_encrypt.h" 1
# 20 "./include/linux/mem_encrypt.h"
# 1 "./arch/x86/include/asm/mem_encrypt.h" 1
# 20 "./arch/x86/include/asm/mem_encrypt.h"
# 1 "./arch/x86/include/uapi/asm/bootparam.h" 1
# 36 "./arch/x86/include/uapi/asm/bootparam.h"
# 1 "./include/linux/screen_info.h" 1
# 1 "./include/uapi/linux/screen_info.h" 1
# 11 "./include/uapi/linux/screen_info.h"
struct screen_info {
__u8 orig_x;
__u8 orig_y;
__u16 ext_mem_k;
__u16 orig_video_page;
__u8 orig_video_mode;
__u8 orig_video_cols;
__u8 flags;
__u8 unused2;
__u16 orig_video_ega_bx;
__u16 unused3;
__u8 orig_video_lines;
__u8 orig_video_isVGA;
__u16 orig_video_points;
__u16 lfb_width;
__u16 lfb_height;
__u16 lfb_depth;
__u32 lfb_base;
__u32 lfb_size;
__u16 cl_magic, cl_offset;
__u16 lfb_linelength;
__u8 red_size;
__u8 red_pos;
__u8 green_size;
__u8 green_pos;
__u8 blue_size;
__u8 blue_pos;
__u8 rsvd_size;
__u8 rsvd_pos;
__u16 vesapm_seg;
__u16 vesapm_off;
__u16 pages;
__u16 vesa_attributes;
__u32 capabilities;
__u32 ext_lfb_base;
__u8 _reserved[2];
} __attribute__((packed));
# 6 "./include/linux/screen_info.h" 2
extern struct screen_info screen_info;
# 37 "./arch/x86/include/uapi/asm/bootparam.h" 2
# 1 "./include/linux/apm_bios.h" 1
# 18 "./include/linux/apm_bios.h"
# 1 "./include/uapi/linux/apm_bios.h" 1
# 22 "./include/uapi/linux/apm_bios.h"
typedef unsigned short apm_event_t;
typedef unsigned short apm_eventinfo_t;
struct apm_bios_info {
__u16 version;
__u16 cseg;
__u32 offset;
__u16 cseg_16;
__u16 dseg;
__u16 flags;
__u16 cseg_len;
__u16 cseg_16_len;
__u16 dseg_len;
};
# 133 "./include/uapi/linux/apm_bios.h"
# 1 "./include/uapi/linux/ioctl.h" 1
# 1 "./arch/x86/include/uapi/asm/ioctl.h" 1
# 1 "./include/asm-generic/ioctl.h" 1
# 1 "./include/uapi/asm-generic/ioctl.h" 1
# 6 "./include/asm-generic/ioctl.h" 2
extern unsigned int __invalid_size_argument_for_IOC;
# 2 "./arch/x86/include/uapi/asm/ioctl.h" 2
# 6 "./include/uapi/linux/ioctl.h" 2
# 134 "./include/uapi/linux/apm_bios.h" 2
# 19 "./include/linux/apm_bios.h" 2
# 35 "./include/linux/apm_bios.h"
struct apm_info {
struct apm_bios_info bios;
unsigned short connection_version;
int get_power_status_broken;
int get_power_status_swabinminutes;
int allow_ints;
int forbid_idle;
int realmode_power_off;
int disabled;
};
# 94 "./include/linux/apm_bios.h"
extern struct apm_info apm_info;
# 38 "./arch/x86/include/uapi/asm/bootparam.h" 2
# 1 "./include/linux/edd.h" 1
# 33 "./include/linux/edd.h"
# 1 "./include/uapi/linux/edd.h" 1
# 72 "./include/uapi/linux/edd.h"
struct edd_device_params {
__u16 length;
__u16 info_flags;
__u32 num_default_cylinders;
__u32 num_default_heads;
__u32 sectors_per_track;
__u64 number_of_sectors;
__u16 bytes_per_sector;
__u32 dpte_ptr;
__u16 key;
__u8 device_path_info_length;
__u8 reserved2;
__u16 reserved3;
__u8 host_bus_type[4];
__u8 interface_type[8];
union {
struct {
__u16 base_address;
__u16 reserved1;
__u32 reserved2;
} __attribute__ ((packed)) isa;
struct {
__u8 bus;
__u8 slot;
__u8 function;
__u8 channel;
__u32 reserved;
} __attribute__ ((packed)) pci;
struct {
__u64 reserved;
} __attribute__ ((packed)) ibnd;
struct {
__u64 reserved;
} __attribute__ ((packed)) xprs;
struct {
__u64 reserved;
} __attribute__ ((packed)) htpt;
struct {
__u64 reserved;
} __attribute__ ((packed)) unknown;
} interface_path;
union {
struct {
__u8 device;
__u8 reserved1;
__u16 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) ata;
struct {
__u8 device;
__u8 lun;
__u8 reserved1;
__u8 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) atapi;
struct {
__u16 id;
__u64 lun;
__u16 reserved1;
__u32 reserved2;
} __attribute__ ((packed)) scsi;
struct {
__u64 serial_number;
__u64 reserved;
} __attribute__ ((packed)) usb;
struct {
__u64 eui;
__u64 reserved;
} __attribute__ ((packed)) i1394;
struct {
__u64 wwid;
__u64 lun;
} __attribute__ ((packed)) fibre;
struct {
__u64 identity_tag;
__u64 reserved;
} __attribute__ ((packed)) i2o;
struct {
__u32 array_number;
__u32 reserved1;
__u64 reserved2;
} __attribute__ ((packed)) raid;
struct {
__u8 device;
__u8 reserved1;
__u16 reserved2;
__u32 reserved3;
__u64 reserved4;
} __attribute__ ((packed)) sata;
struct {
__u64 reserved1;
__u64 reserved2;
} __attribute__ ((packed)) unknown;
} device_path;
__u8 reserved4;
__u8 checksum;
} __attribute__ ((packed));
struct edd_info {
__u8 device;
__u8 version;
__u16 interface_support;
__u16 legacy_max_cylinder;
__u8 legacy_max_head;
__u8 legacy_sectors_per_track;
struct edd_device_params params;
} __attribute__ ((packed));
struct edd {
unsigned int mbr_signature[16];
struct edd_info edd_info[6];
unsigned char mbr_signature_nr;
unsigned char edd_info_nr;
};
# 34 "./include/linux/edd.h" 2
extern struct edd edd;
# 39 "./arch/x86/include/uapi/asm/bootparam.h" 2
# 1 "./arch/x86/include/asm/ist.h" 1
# 18 "./arch/x86/include/asm/ist.h"
# 1 "./arch/x86/include/uapi/asm/ist.h" 1
# 23 "./arch/x86/include/uapi/asm/ist.h"
struct ist_info {
__u32 signature;
__u32 command;
__u32 event;
__u32 perf_level;
};
# 19 "./arch/x86/include/asm/ist.h" 2
extern struct ist_info ist_info;
# 40 "./arch/x86/include/uapi/asm/bootparam.h" 2
# 1 "./include/video/edid.h" 1
# 1 "./include/uapi/video/edid.h" 1
struct edid_info {
unsigned char dummy[128];
};
# 6 "./include/video/edid.h" 2
extern struct edid_info edid_info;
# 41 "./arch/x86/include/uapi/asm/bootparam.h" 2
struct setup_data {
__u64 next;
__u32 type;
__u32 len;
__u8 data[0];
};
struct setup_header {
__u8 setup_sects;
__u16 root_flags;
__u32 syssize;
__u16 ram_size;
__u16 vid_mode;
__u16 root_dev;
__u16 boot_flag;
__u16 jump;
__u32 header;
__u16 version;
__u32 realmode_swtch;
__u16 start_sys_seg;
__u16 kernel_version;
__u8 type_of_loader;
__u8 loadflags;
__u16 setup_move_size;
__u32 code32_start;
__u32 ramdisk_image;
__u32 ramdisk_size;
__u32 bootsect_kludge;
__u16 heap_end_ptr;
__u8 ext_loader_ver;
__u8 ext_loader_type;
__u32 cmd_line_ptr;
__u32 initrd_addr_max;
__u32 kernel_alignment;
__u8 relocatable_kernel;
__u8 min_alignment;
__u16 xloadflags;
__u32 cmdline_size;
__u32 hardware_subarch;
__u64 hardware_subarch_data;
__u32 payload_offset;
__u32 payload_length;
__u64 setup_data;
__u64 pref_address;
__u32 init_size;
__u32 handover_offset;
} __attribute__((packed));
struct sys_desc_table {
__u16 length;
__u8 table[14];
};
struct olpc_ofw_header {
__u32 ofw_magic;
__u32 ofw_version;
__u32 cif_handler;
__u32 irq_desc_table;
} __attribute__((packed));
struct efi_info {
__u32 efi_loader_signature;
__u32 efi_systab;
__u32 efi_memdesc_size;
__u32 efi_memdesc_version;
__u32 efi_memmap;
__u32 efi_memmap_size;
__u32 efi_systab_hi;
__u32 efi_memmap_hi;
};
# 124 "./arch/x86/include/uapi/asm/bootparam.h"
struct boot_e820_entry {
__u64 addr;
__u64 size;
__u32 type;
} __attribute__((packed));
# 139 "./arch/x86/include/uapi/asm/bootparam.h"
struct jailhouse_setup_data {
__u16 version;
__u16 compatible_version;
__u16 pm_timer_address;
__u16 num_cpus;
__u64 pci_mmconfig_base;
__u32 tsc_khz;
__u32 apic_khz;
__u8 standard_ioapic;
__u8 cpu_ids[255];
} __attribute__((packed));
struct boot_params {
struct screen_info screen_info;
struct apm_bios_info apm_bios_info;
__u8 _pad2[4];
__u64 tboot_addr;
struct ist_info ist_info;
__u64 acpi_rsdp_addr;
__u8 _pad3[8];
__u8 hd0_info[16];
__u8 hd1_info[16];
struct sys_desc_table sys_desc_table;
struct olpc_ofw_header olpc_ofw_header;
__u32 ext_ramdisk_image;
__u32 ext_ramdisk_size;
__u32 ext_cmd_line_ptr;
__u8 _pad4[116];
struct edid_info edid_info;
struct efi_info efi_info;
__u32 alt_mem_k;
__u32 scratch;
__u8 e820_entries;
__u8 eddbuf_entries;
__u8 edd_mbr_sig_buf_entries;
__u8 kbd_status;
__u8 secure_boot;
__u8 _pad5[2];
# 189 "./arch/x86/include/uapi/asm/bootparam.h"
__u8 sentinel;
__u8 _pad6[1];
struct setup_header hdr;
__u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
__u32 edd_mbr_sig_buffer[16];
struct boot_e820_entry e820_table[128];
__u8 _pad8[48];
struct edd_info eddbuf[6];
__u8 _pad9[276];
} __attribute__((packed));
# 239 "./arch/x86/include/uapi/asm/bootparam.h"
enum x86_hardware_subarch {
X86_SUBARCH_PC = 0,
X86_SUBARCH_LGUEST,
X86_SUBARCH_XEN,
X86_SUBARCH_INTEL_MID,
X86_SUBARCH_CE4100,
X86_NR_SUBARCHS,
};
# 21 "./arch/x86/include/asm/mem_encrypt.h" 2
# 62 "./arch/x86/include/asm/mem_encrypt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_early_encrypt(resource_size_t paddr,
unsigned long size) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_early_decrypt(resource_size_t paddr,
unsigned long size) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_map_bootdata(char *real_mode_data) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_unmap_bootdata(char *real_mode_data) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_early_init(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_encrypt_kernel(struct boot_params *bp) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) sme_enable(struct boot_params *bp) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sme_active(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sev_active(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__section__(".init.text"))) __attribute__((__cold__))
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__section__(".init.text"))) __attribute__((__cold__))
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
# 96 "./arch/x86/include/asm/mem_encrypt.h"
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
# 21 "./include/linux/mem_encrypt.h" 2
# 31 "./include/linux/mem_encrypt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_encrypt_active(void)
{
return 0ULL;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 sme_get_me_mask(void)
{
return 0ULL;
}
# 7 "./arch/x86/include/asm/processor-flags.h" 2
# 6 "./arch/x86/include/asm/irqflags.h" 2
# 17 "./arch/x86/include/asm/irqflags.h"
extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_save_fl(void);
extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_save_fl(void)
{
unsigned long flags;
asm volatile("# __raw_save_flags\n\t"
"pushf ; pop %0"
: "=rm" (flags)
:
: "memory");
return flags;
}
extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_restore_fl(unsigned long flags);
extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
:
:"g" (flags)
:"memory", "cc");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_irq_disable(void)
{
asm volatile("cli": : :"memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void native_safe_halt(void)
{
asm volatile("sti; hlt": : :"memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void native_halt(void)
{
asm volatile("hlt": : :"memory");
}
# 73 "./arch/x86/include/asm/irqflags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) unsigned long arch_local_save_flags(void)
{
return native_save_fl();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_restore(unsigned long flags)
{
native_restore_fl(flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_disable(void)
{
native_irq_disable();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_enable(void)
{
native_irq_enable();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void arch_safe_halt(void)
{
native_safe_halt();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void halt(void)
{
native_halt();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
arch_local_irq_disable();
return flags;
}
# 158 "./arch/x86/include/asm/irqflags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (((1UL)) << (9)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_irqs_disabled(void)
{
unsigned long flags = arch_local_save_flags();
return arch_irqs_disabled_flags(flags);
}
# 17 "./include/linux/irqflags.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_softirqs_on(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_softirqs_off(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_hardirqs_on(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_hardirqs_off(unsigned long ip) { }
# 40 "./include/linux/rcupdate.h" 2
# 1 "./include/linux/preempt.h" 1
# 78 "./include/linux/preempt.h"
# 1 "./arch/x86/include/asm/preempt.h" 1
# 1 "./include/linux/thread_info.h" 1
# 12 "./include/linux/thread_info.h"
# 1 "./include/linux/bug.h" 1
# 1 "./arch/x86/include/asm/bug.h" 1
# 83 "./arch/x86/include/asm/bug.h"
# 1 "./include/asm-generic/bug.h" 1
# 23 "./include/asm-generic/bug.h"
struct bug_entry {
signed int bug_addr_disp;
signed int file_disp;
unsigned short line;
unsigned short flags;
};
# 106 "./include/asm-generic/bug.h"
extern __attribute__((__format__(printf, 1, 2))) void __warn_printk(const char *fmt, ...);
struct warn_args;
struct pt_regs;
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);
# 84 "./arch/x86/include/asm/bug.h" 2
# 6 "./include/linux/bug.h" 2
enum bug_trap_type {
BUG_TRAP_TYPE_NONE = 0,
BUG_TRAP_TYPE_WARN = 1,
BUG_TRAP_TYPE_BUG = 2,
};
struct pt_regs;
# 34 "./include/linux/bug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_warning_bug(const struct bug_entry *bug)
{
return bug->flags & (1 << 0);
}
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
int is_valid_bugaddr(unsigned long addr);
void generic_bug_clear_once(void);
# 65 "./include/linux/bug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool check_data_corruption(bool v) { return v; }
# 13 "./include/linux/thread_info.h" 2
# 1 "./include/linux/restart_block.h" 1
# 10 "./include/linux/restart_block.h"
# 1 "./include/linux/time64.h" 1
# 1 "./include/linux/math64.h" 1
# 1 "./arch/x86/include/asm/div64.h" 1
# 75 "./arch/x86/include/asm/div64.h"
# 1 "./include/asm-generic/div64.h" 1
# 76 "./arch/x86/include/asm/div64.h" 2
# 7 "./include/linux/math64.h" 2
# 24 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 38 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 52 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 65 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div64_u64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
# 77 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div64_s64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
# 123 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
# 136 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
while (dividend >= divisor) {
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u32_u32(u32 a, u32 b)
{
return (u64)a * b;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
# 256 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
union {
u64 ll;
struct {
u32 low, high;
} l;
} u, rl, rh;
u.ll = a;
rl.ll = mul_u32_u32(u.l.low, mul);
rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
rl.l.high = ({ uint32_t __base = (divisor); uint32_t __rem; __rem = ((uint64_t)(rh.ll)) % __base; (rh.ll) = ((uint64_t)(rh.ll)) / __base; __rem; });
({ uint32_t __base = (divisor); uint32_t __rem; __rem = ((uint64_t)(rl.ll)) % __base; (rl.ll) = ((uint64_t)(rl.ll)) / __base; __rem; });
rl.l.high = rh.l.low;
return rl.ll;
}
# 6 "./include/linux/time64.h" 2
typedef __s64 time64_t;
typedef __u64 timeu64_t;
# 18 "./include/linux/time64.h"
# 1 "./include/uapi/linux/time.h" 1
# 10 "./include/uapi/linux/time.h"
struct timespec {
__kernel_time_t tv_sec;
long tv_nsec;
};
struct timeval {
__kernel_time_t tv_sec;
__kernel_suseconds_t tv_usec;
};
struct timezone {
int tz_minuteswest;
int tz_dsttime;
};
# 35 "./include/uapi/linux/time.h"
struct itimerspec {
struct timespec it_interval;
struct timespec it_value;
};
struct itimerval {
struct timeval it_interval;
struct timeval it_value;
};
# 66 "./include/uapi/linux/time.h"
struct __kernel_old_timeval {
__kernel_long_t tv_sec;
__kernel_long_t tv_usec;
};
# 19 "./include/linux/time64.h" 2
struct timespec64 {
time64_t tv_sec;
long tv_nsec;
};
struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
# 44 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec64 timespec64_add(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec64 timespec64_sub(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec64_valid(const struct timespec64 *ts)
{
if (ts->tv_sec < 0)
return false;
if ((unsigned long)ts->tv_nsec >= 1000000000L)
return false;
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec64_valid_strict(const struct timespec64 *ts)
{
if (!timespec64_valid(ts))
return false;
if ((unsigned long long)ts->tv_sec >= (((s64)~((u64)1 << 63)) / 1000000000L))
return false;
return true;
}
# 118 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 timespec64_to_ns(const struct timespec64 *ts)
{
return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
}
extern struct timespec64 ns_to_timespec64(const s64 nsec);
# 139 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void timespec64_add_ns(struct timespec64 *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
a->tv_nsec = ns;
}
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
const struct timespec64 rhs);
# 11 "./include/linux/restart_block.h" 2
struct timespec;
struct old_timespec32;
struct pollfd;
enum timespec_type {
TT_NONE = 0,
TT_NATIVE = 1,
TT_COMPAT = 2,
};
struct restart_block {
long (*fn)(struct restart_block *);
union {
struct {
u32 *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
u32 *uaddr2;
} futex;
struct {
clockid_t clockid;
enum timespec_type type;
union {
struct timespec *rmtp;
struct old_timespec32 *compat_rmtp;
};
u64 expires;
} nanosleep;
struct {
struct pollfd *ufds;
int nfds;
int has_timeout;
unsigned long tv_sec;
unsigned long tv_nsec;
} poll;
};
};
extern long do_no_restart_syscall(struct restart_block *parm);
# 14 "./include/linux/thread_info.h" 2
# 31 "./include/linux/thread_info.h"
enum {
BAD_STACK = -1,
NOT_STACK = 0,
GOOD_FRAME,
GOOD_STACK,
};
# 1 "./arch/x86/include/asm/thread_info.h" 1
# 12 "./arch/x86/include/asm/thread_info.h"
# 1 "./arch/x86/include/asm/page.h" 1
# 1 "./arch/x86/include/asm/page_types.h" 1
# 48 "./arch/x86/include/asm/page_types.h"
# 1 "./arch/x86/include/asm/page_64_types.h" 1
# 1 "./arch/x86/include/asm/kaslr.h" 1
unsigned long kaslr_get_random_long(const char *purpose);
void kernel_randomize_memory(void);
# 7 "./arch/x86/include/asm/page_64_types.h" 2
# 49 "./arch/x86/include/asm/page_types.h" 2
# 64 "./arch/x86/include/asm/page_types.h"
extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) phys_addr_t get_max_mapped(void)
{
return (phys_addr_t)max_pfn_mapped << 12;
}
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(void);
# 10 "./arch/x86/include/asm/page.h" 2
# 1 "./arch/x86/include/asm/page_64.h" 1
# 11 "./arch/x86/include/asm/page_64.h"
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long page_offset_base;
extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __phys_addr_nodebug(unsigned long x)
{
unsigned long y = x - (0xffffffff80000000UL);
x = y + ((x > y) ? phys_base : ((0xffffffff80000000UL) - ((unsigned long)page_offset_base)));
return x;
}
# 43 "./arch/x86/include/asm/page_64.h"
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_page(void *page)
{
asm volatile ("661:\n\t" "call %P[old]" "\n662:\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+16)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 9*32+ 9)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "call %P[new1]" "\n" "665""1" ":\n\t" "664""2"":\n\t" "call %P[new2]" "\n" "665""2" ":\n\t" ".popsection\n" : "=D" (page), "+r" (current_stack_pointer) : [old] "i" (clear_page_orig), [new1] "i" (clear_page_rep), [new2] "i" (clear_page_erms), "0" (page) : "cc", "memory", "rax", "rcx");
}
void copy_page(void *to, void *from);
# 13 "./arch/x86/include/asm/page.h" 2
struct page;
# 1 "./include/linux/range.h" 1
struct range {
u64 start;
u64 end;
};
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
int add_range_with_merge(struct range *range, int az, int nr_range,
u64 start, u64 end);
void subtract_range(struct range *range, int az, u64 start, u64 end);
int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) resource_size_t cap_resource(u64 val)
{
if (val > ((resource_size_t)~0))
return ((resource_size_t)~0);
return val;
}
# 22 "./arch/x86/include/asm/page.h" 2
extern struct range pfn_mapped[];
extern int nr_pfn_mapped;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
clear_page(page);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
{
copy_page(to, from);
}
# 71 "./arch/x86/include/asm/page.h"
extern bool __virt_addr_valid(unsigned long kaddr);
# 1 "./include/asm-generic/memory_model.h" 1
# 1 "./include/linux/pfn.h" 1
# 13 "./include/linux/pfn.h"
typedef struct {
u64 val;
} pfn_t;
# 6 "./include/asm-generic/memory_model.h" 2
# 77 "./arch/x86/include/asm/page.h" 2
# 1 "./include/asm-generic/getorder.h" 1
# 13 "./include/asm-generic/getorder.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__))
int __get_order(unsigned long size)
{
int order;
size--;
size >>= 12;
order = fls64(size);
return order;
}
# 78 "./arch/x86/include/asm/page.h" 2
# 13 "./arch/x86/include/asm/thread_info.h" 2
# 52 "./arch/x86/include/asm/thread_info.h"
struct task_struct;
# 1 "./arch/x86/include/asm/cpufeature.h" 1
# 1 "./arch/x86/include/asm/processor.h" 1
struct task_struct;
struct mm_struct;
struct vm86;
# 1 "./arch/x86/include/asm/math_emu.h" 1
# 1 "./arch/x86/include/asm/ptrace.h" 1
# 1 "./arch/x86/include/asm/segment.h" 1
# 237 "./arch/x86/include/asm/segment.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
{
return (node << 12) | cpu;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vdso_read_cpunode(unsigned *cpu, unsigned *node)
{
unsigned int p;
# 254 "./arch/x86/include/asm/segment.h"
asm volatile ("661:\n\t" "lsl %[seg],%[p]" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "(16*32+22)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" ".byte 0xf3,0x0f,0xc7,0xf8" "\n" "665""1" ":\n\t" ".popsection\n" : [p] "=a" (p) : "i" (0), [seg] "r" ((15*8 + 3)));
if (cpu)
*cpu = (p & 0xfff);
if (node)
*node = (p >> 12);
}
# 289 "./arch/x86/include/asm/segment.h"
extern const char early_idt_handler_array[32][9];
extern void early_ignore_irq(void);
# 335 "./arch/x86/include/asm/segment.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __loadsegment_fs(unsigned short value)
{
asm volatile(" \n"
"1: movw %0, %%fs \n"
"2: \n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_clear_fs" ") - .\n" " .popsection\n"
: : "rm" (value) : "memory");
}
# 6 "./arch/x86/include/asm/ptrace.h" 2
# 1 "./arch/x86/include/uapi/asm/ptrace.h" 1
# 1 "./arch/x86/include/uapi/asm/ptrace-abi.h" 1
# 7 "./arch/x86/include/uapi/asm/ptrace.h" 2
# 8 "./arch/x86/include/asm/ptrace.h" 2
# 56 "./arch/x86/include/asm/ptrace.h"
struct pt_regs {
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long bp;
unsigned long bx;
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long orig_ax;
unsigned long ip;
unsigned long cs;
unsigned long flags;
unsigned long sp;
unsigned long ss;
};
struct cpuinfo_x86;
struct task_struct;
extern unsigned long profile_pc(struct pt_regs *regs);
extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ax = rc;
}
# 128 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int user_mode(struct pt_regs *regs)
{
return !!(regs->cs & 3);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int v8086_mode(struct pt_regs *regs)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool user_64bit_mode(struct pt_regs *regs)
{
return regs->cs == (6*8 + 3);
}
# 172 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
# 1 "./include/asm-generic/ptrace.h" 1
# 22 "./include/asm-generic/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long instruction_pointer(struct pt_regs *regs)
{
return ((regs)->ip);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
(((regs)->ip) = (val));
}
# 44 "./include/asm-generic/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long user_stack_pointer(struct pt_regs *regs)
{
return ((regs)->sp);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
(((regs)->sp) = (val));
}
# 62 "./include/asm-generic/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long frame_pointer(struct pt_regs *regs)
{
return ((regs)->bp);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
(((regs)->bp) = (val));
}
# 183 "./arch/x86/include/asm/ptrace.h" 2
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
# 198 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (__builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs, ss))), 0))
return 0;
# 223 "./arch/x86/include/asm/ptrace.h"
return *(unsigned long *)((unsigned long)regs + offset);
}
# 234 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~((((1UL) << 12) << (2 + 0)) - 1)) ==
(kernel_stack_pointer(regs) & ~((((1UL) << 12) << (2 + 0)) - 1)));
}
# 250 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return addr;
else
return ((void *)0);
}
extern long probe_kernel_read(void *dst, const void *src, size_t size);
# 273 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr;
unsigned long val;
long ret;
addr = regs_get_kernel_stack_nth_addr(regs, n);
if (addr) {
ret = probe_kernel_read(&val, addr, sizeof(val));
if (!ret)
return val;
}
return 0;
}
# 300 "./arch/x86/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_kernel_argument(struct pt_regs *regs,
unsigned int n)
{
static const unsigned int argument_offs[] = {
__builtin_offsetof(struct pt_regs, di),
__builtin_offsetof(struct pt_regs, si),
__builtin_offsetof(struct pt_regs, dx),
__builtin_offsetof(struct pt_regs, cx),
__builtin_offsetof(struct pt_regs, r8),
__builtin_offsetof(struct pt_regs, r9),
};
if (n >= 6) {
n -= 6 - 1;
return regs_get_kernel_stack_nth(regs, n);
} else
return regs_get_register(regs, argument_offs[n]);
}
# 352 "./arch/x86/include/asm/ptrace.h"
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc *info);
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc *info, int can_allocate);
# 6 "./arch/x86/include/asm/math_emu.h" 2
struct math_emu_info {
long ___orig_eip;
struct pt_regs *regs;
};
# 13 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/uapi/asm/sigcontext.h" 1
# 40 "./arch/x86/include/uapi/asm/sigcontext.h"
struct _fpx_sw_bytes {
__u32 magic1;
# 54 "./arch/x86/include/uapi/asm/sigcontext.h"
__u32 extended_size;
__u64 xfeatures;
__u32 xstate_size;
__u32 padding[7];
};
# 85 "./arch/x86/include/uapi/asm/sigcontext.h"
struct _fpreg {
__u16 significand[4];
__u16 exponent;
};
struct _fpxreg {
__u16 significand[4];
__u16 exponent;
__u16 padding[3];
};
struct _xmmreg {
__u32 element[4];
};
struct _fpstate_32 {
__u32 cw;
__u32 sw;
__u32 tag;
__u32 ipoff;
__u32 cssel;
__u32 dataoff;
__u32 datasel;
struct _fpreg _st[8];
__u16 status;
__u16 magic;
__u32 _fxsr_env[6];
__u32 mxcsr;
__u32 reserved;
struct _fpxreg _fxsr_st[8];
struct _xmmreg _xmm[8];
union {
__u32 padding1[44];
__u32 padding[44];
};
union {
__u32 padding2[12];
struct _fpx_sw_bytes sw_reserved;
};
};
# 149 "./arch/x86/include/uapi/asm/sigcontext.h"
struct _fpstate_64 {
__u16 cwd;
__u16 swd;
__u16 twd;
__u16 fop;
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcsr_mask;
__u32 st_space[32];
__u32 xmm_space[64];
__u32 reserved2[12];
union {
__u32 reserved3[12];
struct _fpx_sw_bytes sw_reserved;
};
};
struct _header {
__u64 xfeatures;
__u64 reserved1[2];
__u64 reserved2[5];
};
struct _ymmh_state {
__u32 ymmh_space[64];
};
# 192 "./arch/x86/include/uapi/asm/sigcontext.h"
struct _xstate {
struct _fpstate_64 fpstate;
struct _header xstate_hdr;
struct _ymmh_state ymmh;
};
struct sigcontext_32 {
__u16 gs, __gsh;
__u16 fs, __fsh;
__u16 es, __esh;
__u16 ds, __dsh;
__u32 di;
__u32 si;
__u32 bp;
__u32 sp;
__u32 bx;
__u32 dx;
__u32 cx;
__u32 ax;
__u32 trapno;
__u32 err;
__u32 ip;
__u16 cs, __csh;
__u32 flags;
__u32 sp_at_signal;
__u16 ss, __ssh;
# 230 "./arch/x86/include/uapi/asm/sigcontext.h"
__u32 fpstate;
__u32 oldmask;
__u32 cr2;
};
struct sigcontext_64 {
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 di;
__u64 si;
__u64 bp;
__u64 bx;
__u64 dx;
__u64 ax;
__u64 cx;
__u64 sp;
__u64 ip;
__u64 flags;
__u16 cs;
__u16 gs;
__u16 fs;
__u16 ss;
__u64 err;
__u64 trapno;
__u64 oldmask;
__u64 cr2;
# 273 "./arch/x86/include/uapi/asm/sigcontext.h"
__u64 fpstate;
__u64 reserved1[8];
};
# 16 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/pgtable_types.h" 1
# 139 "./arch/x86/include/asm/pgtable_types.h"
enum page_cache_mode {
_PAGE_CACHE_MODE_WB = 0,
_PAGE_CACHE_MODE_WC = 1,
_PAGE_CACHE_MODE_UC_MINUS = 2,
_PAGE_CACHE_MODE_UC = 3,
_PAGE_CACHE_MODE_WT = 4,
_PAGE_CACHE_MODE_WP = 5,
_PAGE_CACHE_MODE_NUM = 8
};
# 250 "./arch/x86/include/asm/pgtable_types.h"
# 1 "./arch/x86/include/asm/pgtable_64_types.h" 1
# 1 "./arch/x86/include/asm/sparsemem.h" 1
# 6 "./arch/x86/include/asm/pgtable_64_types.h" 2
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long p4dval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct { pteval_t pte; } pte_t;
# 43 "./arch/x86/include/asm/pgtable_64_types.h"
extern unsigned int pgdir_shift;
extern unsigned int ptrs_per_p4d;
# 251 "./arch/x86/include/asm/pgtable_types.h" 2
# 266 "./arch/x86/include/asm/pgtable_types.h"
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef struct { pgdval_t pgd; } pgd_t;
# 293 "./arch/x86/include/asm/pgtable_types.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val & (~0ULL) };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd & (~0ULL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))));
}
# 321 "./arch/x86/include/asm/pgtable_types.h"
# 1 "./include/asm-generic/pgtable-nop4d.h" 1
typedef struct { pgd_t pgd; } p4d_t;
# 22 "./include/asm-generic/pgtable-nop4d.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_none(pgd_t pgd) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_bad(pgd_t pgd) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_present(pgd_t pgd) { return 1; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgd_clear(pgd_t *pgd) { }
# 36 "./include/asm-generic/pgtable-nop4d.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
return (p4d_t *)pgd;
}
# 322 "./arch/x86/include/asm/pgtable_types.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t native_make_p4d(pudval_t val)
{
return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t native_p4d_val(p4d_t p4d)
{
return native_pgd_val(p4d.pgd);
}
typedef struct { pudval_t pud; } pud_t;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_make_pud(pmdval_t val)
{
return (pud_t) { val };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t native_pud_val(pud_t pud)
{
return pud.pud;
}
# 361 "./arch/x86/include/asm/pgtable_types.h"
typedef struct { pmdval_t pmd; } pmd_t;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_make_pmd(pmdval_t val)
{
return (pmd_t) { val };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
# 386 "./arch/x86/include/asm/pgtable_types.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_pfn_mask(p4d_t p4d)
{
return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_flags_mask(p4d_t p4d)
{
return ~p4d_pfn_mask(p4d);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_flags(p4d_t p4d)
{
return native_p4d_val(p4d) & p4d_flags_mask(p4d);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_pfn_mask(pud_t pud)
{
if (native_pud_val(pud) & (((pteval_t)(1)) << 7))
return (((signed long)(~(((1UL) << 30)-1))) & ((phys_addr_t)((1ULL << 52) - 1)));
else
return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_flags_mask(pud_t pud)
{
return ~pud_pfn_mask(pud);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & pud_flags_mask(pud);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_pfn_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & (((pteval_t)(1)) << 7))
return (((signed long)(~(((1UL) << 21)-1))) & ((phys_addr_t)((1ULL << 52) - 1)));
else
return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_flags_mask(pmd_t pmd)
{
return ~pmd_pfn_mask(pmd);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & pmd_flags_mask(pmd);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte = val };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pteval_t native_pte_val(pte_t pte)
{
return pte.pte;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pteval_t pte_flags(pte_t pte)
{
return native_pte_val(pte) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))));
}
extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
extern uint8_t __pte2cachemode_tbl[8];
# 468 "./arch/x86/include/asm/pgtable_types.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long cachemode2protval(enum page_cache_mode pcm)
{
if (__builtin_expect(!!(pcm == 0), 1))
return 0;
return __cachemode2pte_tbl[pcm];
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
{
return ((pgprot_t) { (cachemode2protval(pcm)) } );
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
{
unsigned long masked;
masked = ((pgprot).pgprot) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3));
if (__builtin_expect(!!(masked == 0), 1))
return 0;
return __pte2cachemode_tbl[((((masked) >> (7 - 2)) & 4) | (((masked) >> (4 - 1)) & 2) | (((masked) >> 3) & 1))];
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
{
pgprotval_t val = ((pgprot).pgprot);
pgprot_t new;
((new).pgprot) = (val & ~((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 12))) |
((val & (((pteval_t)(1)) << 7)) << (12 - 7));
return new;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
{
pgprotval_t val = ((pgprot).pgprot);
pgprot_t new;
((new).pgprot) = (val & ~((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 12))) |
((val & (((pteval_t)(1)) << 12)) >>
(12 - 7));
return new;
}
typedef struct page *pgtable_t;
extern pteval_t __supported_pte_mask;
extern pteval_t __default_kernel_pte_mask;
extern void set_nx(void);
extern int nx_enabled;
extern pgprot_t pgprot_writecombine(pgprot_t prot);
extern pgprot_t pgprot_writethrough(pgprot_t prot);
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
struct seq_file;
extern void arch_report_meminfo(struct seq_file *m);
enum pg_level {
PG_LEVEL_NONE,
PG_LEVEL_4K,
PG_LEVEL_2M,
PG_LEVEL_1G,
PG_LEVEL_512G,
PG_LEVEL_NUM
};
extern void update_page_count(int level, unsigned long pages);
# 562 "./arch/x86/include/asm/pgtable_types.h"
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
unsigned long address,
unsigned numpages,
unsigned long page_flags);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
unsigned long numpages);
# 20 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/msr.h" 1
# 1 "./arch/x86/include/asm/msr-index.h" 1
# 6 "./arch/x86/include/asm/msr.h" 2
# 1 "./arch/x86/include/uapi/asm/errno.h" 1
# 1 "./include/uapi/asm-generic/errno.h" 1
# 1 "./include/uapi/asm-generic/errno-base.h" 1
# 6 "./include/uapi/asm-generic/errno.h" 2
# 2 "./arch/x86/include/uapi/asm/errno.h" 2
# 11 "./arch/x86/include/asm/msr.h" 2
# 1 "./arch/x86/include/asm/cpumask.h" 1
# 1 "./include/linux/cpumask.h" 1
# 12 "./include/linux/cpumask.h"
# 1 "./include/linux/bitmap.h" 1
# 1 "./include/linux/string.h" 1
# 10 "./include/linux/string.h"
# 1 "./include/uapi/linux/string.h" 1
# 11 "./include/linux/string.h" 2
extern char *strndup_user(const char *, long);
extern void *memdup_user(const void *, size_t);
extern void *vmemdup_user(const void *, size_t);
extern void *memdup_user_nul(const void *, size_t);
# 1 "./arch/x86/include/asm/string.h" 1
# 1 "./arch/x86/include/asm/string_64.h" 1
# 1 "./include/linux/jump_label.h" 1
# 79 "./include/linux/jump_label.h"
extern bool static_key_initialized;
struct static_key {
atomic_t enabled;
# 102 "./include/linux/jump_label.h"
union {
unsigned long type;
struct jump_entry *entries;
struct static_key_mod *next;
};
};
# 117 "./include/linux/jump_label.h"
# 1 "./arch/x86/include/asm/jump_label.h" 1
# 118 "./include/linux/jump_label.h" 2
struct jump_entry {
s32 code;
s32 target;
long key;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long jump_entry_code(const struct jump_entry *entry)
{
return (unsigned long)&entry->code + entry->code;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long jump_entry_target(const struct jump_entry *entry)
{
return (unsigned long)&entry->target + entry->target;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct static_key *jump_entry_key(const struct jump_entry *entry)
{
long offset = entry->key & ~3L;
return (struct static_key *)((unsigned long)&entry->key + offset);
}
# 164 "./include/linux/jump_label.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool jump_entry_is_branch(const struct jump_entry *entry)
{
return (unsigned long)entry->key & 1UL;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool jump_entry_is_init(const struct jump_entry *entry)
{
return (unsigned long)entry->key & 2UL;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void jump_entry_set_init(struct jump_entry *entry)
{
entry->key |= 2;
}
enum jump_label_type {
JUMP_LABEL_NOP = 0,
JUMP_LABEL_JMP,
};
struct module;
# 204 "./include/linux/jump_label.h"
extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type);
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
# 339 "./include/linux/jump_label.h"
struct static_key_true {
struct static_key key;
};
struct static_key_false {
struct static_key key;
};
# 378 "./include/linux/jump_label.h"
extern bool ____wrong_branch_error(void);
# 7 "./arch/x86/include/asm/string_64.h" 2
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
# 33 "./arch/x86/include/asm/string_64.h"
void *memset(void *s, int c, size_t n);
void *__memset(void *s, int c, size_t n);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset16(uint16_t *s, uint16_t v, size_t n)
{
long d0, d1;
asm volatile("rep\n\t"
"stosw"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
return s;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset32(uint32_t *s, uint32_t v, size_t n)
{
long d0, d1;
asm volatile("rep\n\t"
"stosl"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
return s;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset64(uint64_t *s, uint64_t v, size_t n)
{
long d0, d1;
asm volatile("rep\n\t"
"stosq"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
return s;
}
void *memmove(void *dest, const void *src, size_t count);
void *__memmove(void *dest, const void *src, size_t count);
int memcmp(const void *cs, const void *ct, size_t count);
size_t strlen(const char *s);
char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
# 101 "./arch/x86/include/asm/string_64.h"
__attribute__((__warn_unused_result__)) unsigned long __memcpy_mcsafe(void *dst, const void *src,
size_t cnt);
extern struct static_key_false mcsafe_key;
# 120 "./arch/x86/include/asm/string_64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long
memcpy_mcsafe(void *dst, const void *src, size_t cnt)
{
if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&mcsafe_key), struct static_key_true)) branch = ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" "2:\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(&mcsafe_key)->key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); else if (__builtin_types_compatible_p(typeof(*&mcsafe_key), struct static_key_false)) branch = ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(&mcsafe_key)->key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }))
return __memcpy_mcsafe(dst, src, cnt);
else
({ size_t __len = (cnt); void *__ret; if (__builtin_constant_p(cnt) && __len >= 64) __ret = __memcpy((dst), (src), __len); else __ret = __builtin_memcpy((dst), (src), __len); __ret; });
return 0;
}
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
if (__builtin_constant_p(cnt)) {
switch (cnt) {
case 4:
asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
return;
case 8:
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
return;
case 16:
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
return;
}
}
__memcpy_flushcache(dst, src, cnt);
}
# 6 "./arch/x86/include/asm/string.h" 2
# 21 "./include/linux/string.h" 2
extern char * strcpy(char *,const char *);
extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
ssize_t strscpy(char *, const char *, size_t);
extern char * strcat(char *, const char *);
extern char * strncat(char *, const char *, __kernel_size_t);
extern size_t strlcat(char *, const char *, __kernel_size_t);
extern int strcmp(const char *,const char *);
extern int strncmp(const char *,const char *,__kernel_size_t);
extern int strcasecmp(const char *s1, const char *s2);
extern int strncasecmp(const char *s1, const char *s2, size_t n);
extern char * strchr(const char *,int);
extern char * strchrnul(const char *,int);
extern char * strnchr(const char *, size_t, int);
extern char * strrchr(const char *,int);
extern char * __attribute__((__warn_unused_result__)) skip_spaces(const char *);
extern char *strim(char *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) char *strstrip(char *str)
{
return strim(str);
}
extern char * strstr(const char *, const char *);
extern char * strnstr(const char *, const char *, size_t);
extern __kernel_size_t strlen(const char *);
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
extern char * strpbrk(const char *,const char *);
extern char * strsep(char **,const char *);
extern __kernel_size_t strspn(const char *,const char *);
extern __kernel_size_t strcspn(const char *,const char *);
# 117 "./include/linux/string.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset_l(unsigned long *p, unsigned long v,
__kernel_size_t n)
{
if (64 == 32)
return memset32((uint32_t *)p, v, n);
else
return memset64((uint64_t *)p, v, n);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset_p(void **p, void *v, __kernel_size_t n)
{
if (64 == 32)
return memset32((uint32_t *)p, (uintptr_t)v, n);
else
return memset64((uint64_t *)p, (uintptr_t)v, n);
}
extern void **__memcat_p(void **a, void **b);
# 148 "./include/linux/string.h"
extern void * memscan(void *,int,__kernel_size_t);
extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
# 170 "./include/linux/string.h"
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __attribute__((__malloc__));
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
extern int kstrtobool(const char *s, bool *res);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
# 204 "./include/linux/string.h"
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((__format__(printf, 3, 4)));
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
size_t memweight(const void *ptr, size_t bytes);
void memzero_explicit(void *s, size_t count);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *kbasename(const char *path)
{
const char *tail = strrchr(path, '/');
return tail ? tail + 1 : path;
}
void fortify_panic(const char *name) __attribute__((__noreturn__)) __attribute__((__cold__));
void __read_overflow(void) ;
void __read_overflow2(void) ;
void __read_overflow3(void) ;
void __write_overflow(void) ;
# 449 "./include/linux/string.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcpy_and_pad(void *dest, size_t dest_len,
const void *src, size_t count, int pad)
{
if (dest_len > count) {
({ size_t __len = (count); void *__ret; if (__builtin_constant_p(count) && __len >= 64) __ret = __memcpy((dest), (src), __len); else __ret = __builtin_memcpy((dest), (src), __len); __ret; });
memset(dest + count, pad, dest_len - count);
} else
({ size_t __len = (dest_len); void *__ret; if (__builtin_constant_p(dest_len) && __len >= 64) __ret = __memcpy((dest), (src), __len); else __ret = __builtin_memcpy((dest), (src), __len); __ret; });
}
# 473 "./include/linux/string.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) size_t str_has_prefix(const char *str, const char *prefix)
{
size_t len = strlen(prefix);
return strncmp(str, prefix, len) == 0 ? len : 0;
}
# 10 "./include/linux/bitmap.h" 2
# 111 "./include/linux/bitmap.h"
extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
extern void bitmap_free(const unsigned long *bitmap);
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
extern void __bitmap_set(unsigned long *map, unsigned int start, int len);
extern void __bitmap_clear(unsigned long *map, unsigned int start, int len);
extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset);
# 164 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long
bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
# 215 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
memset(dst, 0, len);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
({ size_t __len = (len); void *__ret; if (__builtin_constant_p(len) && __len >= 64) __ret = __memcpy((dst), (src), __len); else __ret = __builtin_memcpy((dst), (src), __len); __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_copy_clear_tail(unsigned long *dst,
const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % 64)
dst[nbits / 64] &= (~0UL >> (-(nbits) & (64 - 1)));
}
extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
unsigned int nbits);
extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
unsigned int nbits);
# 263 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return (*dst = *src1 & *src2 & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return (*dst = *src1 & ~(*src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = ~(*src);
else
__bitmap_complement(dst, src, nbits);
}
# 313 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return !((*src1 ^ *src2) & (~0UL >> (-(nbits) & (64 - 1))));
if (__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ((*src1 & *src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! ((*src1 & ~(*src2)) & (~0UL >> (-(nbits) & (64 - 1))));
else
return __bitmap_subset(src1, src2, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! (*src & (~0UL >> (-(nbits) & (64 - 1))));
return find_first_bit(src, nbits) == nbits;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! (~(*src) & (~0UL >> (-(nbits) & (64 - 1))));
return find_first_zero_bit(src, nbits) == nbits;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return hweight_long(*src & (~0UL >> (-(nbits) & (64 - 1))));
return __bitmap_weight(src, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
else if (__builtin_constant_p(start & (8 - 1)) &&
(((start) & ((typeof(start))(8) - 1)) == 0) &&
__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void bitmap_clear(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
else if (__builtin_constant_p(start & (8 - 1)) &&
(((start) & ((typeof(start))(8) - 1)) == 0) &&
__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = (*src & (~0UL >> (-(nbits) & (64 - 1)))) >> shift;
else
__bitmap_shift_right(dst, src, shift, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = (*src << shift) & (~0UL >> (-(nbits) & (64 - 1)));
else
__bitmap_shift_left(dst, src, shift, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *maskp, int nmaskbits)
{
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
# 460 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_from_u64(unsigned long *dst, u64 mask)
{
dst[0] = mask & (~0UL);
if (sizeof(mask) > sizeof(unsigned long))
dst[1] = mask >> 32;
}
# 13 "./include/linux/cpumask.h" 2
typedef struct cpumask { unsigned long bits[(((64) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } cpumask_t;
# 38 "./include/linux/cpumask.h"
extern unsigned int nr_cpu_ids;
# 89 "./include/linux/cpumask.h"
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
# 118 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_check(unsigned int cpu)
{
cpu_max_bits_warn(cpu, ((unsigned int)64));
return cpu;
}
# 196 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(((srcp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(((srcp)->bits), ((unsigned int)64));
}
unsigned int cpumask_next(int n, const struct cpumask *srcp);
# 221 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
if (n != -1)
cpumask_check(n);
return find_next_zero_bit(((srcp)->bits), ((unsigned int)64), n+1);
}
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
# 257 "./include/linux/cpumask.h"
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
# 309 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), ((dstp)->bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), ((dstp)->bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), ((dstp)->bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), ((dstp)->bits));
}
# 342 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return (__builtin_constant_p((cpumask_check(cpu))) ? constant_test_bit((cpumask_check(cpu)), ((((cpumask))->bits))) : variable_test_bit((cpumask_check(cpu)), ((((cpumask))->bits))));
}
# 356 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
}
# 370 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_setall(struct cpumask *dstp)
{
bitmap_fill(((dstp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(((dstp)->bits), ((unsigned int)64));
}
# 401 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_xor(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)64));
}
# 444 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_complement(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_complement(((dstp)->bits), ((srcp)->bits),
((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_equal(((src1p)->bits), ((src2p)->bits),
((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
((unsigned int)64));
}
# 495 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(((src1p)->bits), ((src2p)->bits),
((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(((srcp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(((srcp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(((srcp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_shift_right(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_shift_left(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_copy(((dstp)->bits), ((srcp)->bits), ((unsigned int)64));
}
# 606 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parse_user(const char *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, ((dstp)->bits), ((unsigned int)64));
}
# 620 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parselist_user(const char *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, ((dstp)->bits),
((unsigned int)64));
}
# 634 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parse(const char *buf, struct cpumask *dstp)
{
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
return bitmap_parse(buf, len, ((dstp)->bits), ((unsigned int)64));
}
# 649 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, ((dstp)->bits), ((unsigned int)64));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_size(void)
{
return (((((unsigned int)64)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(long);
}
# 722 "./include/linux/cpumask.h"
typedef struct cpumask cpumask_var_t[1];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void free_cpumask_var(cpumask_var_t mask)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_available(cpumask_var_t mask)
{
return true;
}
extern const unsigned long cpu_all_bits[(((64) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
# 782 "./include/linux/cpumask.h"
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void reset_cpu_possible_mask(void)
{
bitmap_zero(((&__cpu_possible_mask)->bits), 64);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
set_cpu_possible(unsigned int cpu, bool possible)
{
if (possible)
cpumask_set_cpu(cpu, &__cpu_possible_mask);
else
cpumask_clear_cpu(cpu, &__cpu_possible_mask);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
set_cpu_present(unsigned int cpu, bool present)
{
if (present)
cpumask_set_cpu(cpu, &__cpu_present_mask);
else
cpumask_clear_cpu(cpu, &__cpu_present_mask);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
set_cpu_online(unsigned int cpu, bool online)
{
if (online)
cpumask_set_cpu(cpu, &__cpu_online_mask);
else
cpumask_clear_cpu(cpu, &__cpu_online_mask);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
set_cpu_active(unsigned int cpu, bool active)
{
if (active)
cpumask_set_cpu(cpu, &__cpu_active_mask);
else
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
# 842 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
# 854 "./include/linux/cpumask.h"
extern const unsigned long
cpu_bit_bitmap[64 +1][(((64) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % 64];
p -= cpu / 64;
return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
}
# 891 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, ((mask)->bits),
nr_cpu_ids);
}
# 6 "./arch/x86/include/asm/cpumask.h" 2
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
extern void setup_cpu_local_masks(void);
# 12 "./arch/x86/include/asm/msr.h" 2
# 1 "./arch/x86/include/uapi/asm/msr.h" 1
# 13 "./arch/x86/include/asm/msr.h" 2
struct msr {
union {
struct {
u32 l;
u32 h;
};
u64 q;
};
};
struct msr_info {
u32 msr_no;
struct msr reg;
struct msr *msrs;
int err;
};
struct msr_regs_info {
u32 *regs;
int err;
};
struct saved_msr {
bool valid;
struct msr_info info;
};
struct saved_msrs {
unsigned int num;
struct saved_msr *array;
};
# 68 "./arch/x86/include/asm/msr.h"
# 1 "./include/linux/tracepoint-defs.h" 1
# 12 "./include/linux/tracepoint-defs.h"
# 1 "./include/linux/static_key.h" 1
# 13 "./include/linux/tracepoint-defs.h" 2
struct trace_print_flags {
unsigned long mask;
const char *name;
};
struct trace_print_flags_u64 {
unsigned long long mask;
const char *name;
};
struct tracepoint_func {
void *func;
void *data;
int prio;
};
struct tracepoint {
const char *name;
struct static_key key;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func *funcs;
};
typedef const int tracepoint_ptr_t;
struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
u32 num_args;
} __attribute__((__aligned__(32)));
# 69 "./arch/x86/include/asm/msr.h" 2
extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
# 91 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long __attribute__((no_instrument_function)) __rdmsr(unsigned int msr)
{
unsigned long low, high;
asm volatile("1: rdmsr\n"
"2:\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_rdmsr_unsafe" ") - .\n" " .popsection\n"
: "=a" (low), "=d" (high) : "c" (msr));
return ((low) | (high) << 32);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((no_instrument_function)) __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_wrmsr_unsafe" ") - .\n" " .popsection\n"
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
# 125 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_msr(unsigned int msr)
{
unsigned long long val;
val = __rdmsr(msr);
if (({ ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(__tracepoint_read_msr).key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); }))
do_trace_read_msr(msr, val, 0);
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_msr_safe(unsigned int msr,
int *err)
{
unsigned long low, high;
asm volatile("2: rdmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err]\n\t"
"xorl %%eax, %%eax\n\t"
"xorl %%edx, %%edx\n\t"
"jmp 1b\n\t"
".previous\n\t"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "2b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default" ") - .\n" " .popsection\n"
: [err] "=r" (*err), "=a" (low), "=d" (high)
: "c" (msr), [fault] "i" (-5));
if (({ ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(__tracepoint_read_msr).key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); }))
do_trace_read_msr(msr, ((low) | (high) << 32), *err);
return ((low) | (high) << 32);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((no_instrument_function))
native_write_msr(unsigned int msr, u32 low, u32 high)
{
__wrmsr(msr, low, high);
if (({ ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(__tracepoint_write_msr).key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); }))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((no_instrument_function))
native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
" .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "2b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default" ") - .\n" " .popsection\n"
: [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-5)
: "memory");
if (({ ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(__tracepoint_write_msr).key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); }))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
# 201 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long long rdtsc(void)
{
unsigned long low, high;
asm volatile("rdtsc" : "=a" (low), "=d" (high));
return ((low) | (high) << 32);
}
# 218 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long long rdtsc_ordered(void)
{
# 231 "./arch/x86/include/asm/msr.h"
asm volatile("661:\n\t" "" "\n662:\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+17)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "mfence" "\n" "665""1" ":\n\t" "664""2"":\n\t" "lfence" "\n" "665""2" ":\n\t" ".popsection\n" ::: "memory");
return rdtsc();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_pmc(int counter)
{
unsigned long low, high;
asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
if (({ ({ __label__ l_yes; __label__ l_done; bool ret; asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (&(__tracepoint_rdpmc).key), "i" (false) : : l_yes); ret = false; goto l_done; l_yes: ret = true; l_done: ret; }); }))
do_trace_rdpmc(counter, ((low) | (high) << 32), 0);
return ((low) | (high) << 32);
}
# 1 "./include/linux/errno.h" 1
# 1 "./include/uapi/linux/errno.h" 1
# 1 "./arch/x86/include/uapi/asm/errno.h" 1
# 2 "./include/uapi/linux/errno.h" 2
# 6 "./include/linux/errno.h" 2
# 249 "./arch/x86/include/asm/msr.h" 2
# 262 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wrmsr(unsigned int msr, u32 low, u32 high)
{
native_write_msr(msr, low, high);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wrmsrl(unsigned int msr, u64 val)
{
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wrmsr_safe(unsigned int msr, u32 low, u32 high)
{
return native_write_msr_safe(msr, low, high);
}
# 291 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rdmsrl_safe(unsigned int msr, unsigned long long *p)
{
int err;
*p = native_read_msr_safe(msr, &err);
return err;
}
# 313 "./arch/x86/include/asm/msr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit);
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
# 22 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/desc_defs.h" 1
# 16 "./arch/x86/include/asm/desc_defs.h"
struct desc_struct {
u16 limit0;
u16 base0;
u16 base1: 8, type: 4, s: 1, dpl: 2, p: 1;
u16 limit1: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
} __attribute__((packed));
# 40 "./arch/x86/include/asm/desc_defs.h"
enum {
GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF,
GATE_CALL = 0xC,
GATE_TASK = 0x5,
};
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
DESCTYPE_S = 0x10,
};
struct ldttss_desc {
u16 limit0;
u16 base0;
u16 base1 : 8, type : 5, dpl : 2, p : 1;
u16 limit1 : 4, zero0 : 3, g : 1, base2 : 8;
u32 base3;
u32 zero1;
} __attribute__((packed));
typedef struct ldttss_desc ldt_desc;
typedef struct ldttss_desc tss_desc;
struct idt_bits {
u16 ist : 3,
zero : 5,
type : 5,
dpl : 2,
p : 1;
} __attribute__((packed));
struct gate_struct {
u16 offset_low;
u16 segment;
struct idt_bits bits;
u16 offset_middle;
u32 offset_high;
u32 reserved;
} __attribute__((packed));
typedef struct gate_struct gate_desc;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long gate_offset(const gate_desc *g)
{
return g->offset_low | ((unsigned long)g->offset_middle << 16) |
((unsigned long) g->offset_high << 32);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long gate_segment(const gate_desc *g)
{
return g->segment;
}
struct desc_ptr {
unsigned short size;
unsigned long address;
} __attribute__((packed)) ;
# 23 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/special_insns.h" 1
# 17 "./arch/x86/include/asm/special_insns.h"
extern unsigned long __force_order;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr0(void)
{
unsigned long val;
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr0(unsigned long val)
{
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr2(void)
{
unsigned long val;
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr2(unsigned long val)
{
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __native_read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr4(void)
{
unsigned long val;
# 70 "./arch/x86/include/asm/special_insns.h"
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr4(unsigned long val)
{
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr8(void)
{
unsigned long cr8;
asm volatile("movq %%cr8,%0" : "=r" (cr8));
return cr8;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr8(unsigned long val)
{
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __read_pkru(void)
{
u32 ecx = 0;
u32 edx, pkru;
asm volatile(".byte 0x0f,0x01,0xee\n\t"
: "=a" (pkru), "=d" (edx)
: "c" (ecx));
return pkru;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __write_pkru(u32 pkru)
{
u32 ecx = 0, edx = 0;
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (pkru), "c"(ecx), "d"(edx));
}
# 132 "./arch/x86/include/asm/special_insns.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
extern void native_load_gs_index(unsigned);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_cr4(void)
{
return native_read_cr4();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr0(void)
{
return native_read_cr0();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr0(unsigned long x)
{
native_write_cr0(x);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr2(void)
{
return native_read_cr2();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr2(unsigned long x)
{
native_write_cr2(x);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_cr3(void)
{
return __native_read_cr3();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr3(unsigned long x)
{
native_write_cr3(x);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __write_cr4(unsigned long x)
{
native_write_cr4(x);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wbinvd(void)
{
native_wbinvd();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr8(void)
{
return native_read_cr8();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr8(unsigned long x)
{
native_write_cr8(x);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_gs_index(unsigned selector)
{
native_load_gs_index(selector);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char *)__p));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clflushopt(volatile void *__p)
{
asm volatile ("661:\n\t" ".byte " "0x3e" "; clflush %P0" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" ".byte 0x66; clflush %P0" "\n" "665""1" ":\n\t" ".popsection\n" : "+m" (*(volatile char *)__p) : "i" (0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clwb(volatile void *__p)
{
volatile struct { char x[64]; } *p = __p;
asm volatile("661:\n\t" ".byte " "0x3e" "; clflush (%[pax])" "\n662:\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 9*32+24)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" ".byte 0x66; clflush (%[pax])" "\n" "665""1" ":\n\t" "664""2"":\n\t" ".byte 0x66, 0x0f, 0xae, 0x30" "\n" "665""2" ":\n\t" ".popsection\n"
: [p] "+m" (*p)
: [pax] "a" (p));
}
# 25 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/fpu/types.h" 1
# 12 "./arch/x86/include/asm/fpu/types.h"
struct fregs_state {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
u32 st_space[20];
u32 status;
};
struct fxregs_state {
u16 cwd;
u16 swd;
u16 twd;
u16 fop;
union {
struct {
u64 rip;
u64 rdp;
};
struct {
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
};
};
u32 mxcsr;
u32 mxcsr_mask;
u32 st_space[32];
u32 xmm_space[64];
u32 padding[12];
union {
u32 padding1[12];
u32 sw_reserved[12];
};
} __attribute__((aligned(16)));
# 79 "./arch/x86/include/asm/fpu/types.h"
struct swregs_state {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
enum xfeature {
XFEATURE_FP,
XFEATURE_SSE,
XFEATURE_YMM,
XFEATURE_BNDREGS,
XFEATURE_BNDCSR,
XFEATURE_OPMASK,
XFEATURE_ZMM_Hi256,
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
XFEATURE_MAX,
};
# 139 "./arch/x86/include/asm/fpu/types.h"
struct reg_128_bit {
u8 regbytes[128/8];
};
struct reg_256_bit {
u8 regbytes[256/8];
};
struct reg_512_bit {
u8 regbytes[512/8];
};
# 159 "./arch/x86/include/asm/fpu/types.h"
struct ymmh_struct {
struct reg_128_bit hi_ymm[16];
} __attribute__((__packed__));
struct mpx_bndreg {
u64 lower_bound;
u64 upper_bound;
} __attribute__((__packed__));
struct mpx_bndreg_state {
struct mpx_bndreg bndreg[4];
} __attribute__((__packed__));
struct mpx_bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __attribute__((__packed__));
struct mpx_bndcsr_state {
union {
struct mpx_bndcsr bndcsr;
u8 pad_to_64_bytes[64];
};
} __attribute__((__packed__));
struct avx_512_opmask_state {
u64 opmask_reg[8];
} __attribute__((__packed__));
struct avx_512_zmm_uppers_state {
struct reg_256_bit zmm_upper[16];
} __attribute__((__packed__));
struct avx_512_hi16_state {
struct reg_512_bit hi16_zmm[16];
} __attribute__((__packed__));
struct pkru_state {
u32 pkru;
u32 pad;
} __attribute__((__packed__));
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
# 253 "./arch/x86/include/asm/fpu/types.h"
struct xregs_state {
struct fxregs_state i387;
struct xstate_header header;
u8 extended_state_area[0];
} __attribute__ ((packed, aligned (64)));
# 268 "./arch/x86/include/asm/fpu/types.h"
union fpregs_state {
struct fregs_state fsave;
struct fxregs_state fxsave;
struct swregs_state soft;
struct xregs_state xsave;
u8 __padding[((1UL) << 12)];
};
struct fpu {
# 294 "./arch/x86/include/asm/fpu/types.h"
unsigned int last_cpu;
# 303 "./arch/x86/include/asm/fpu/types.h"
unsigned char initialized;
# 314 "./arch/x86/include/asm/fpu/types.h"
union fpregs_state state;
};
# 26 "./arch/x86/include/asm/processor.h" 2
# 1 "./arch/x86/include/asm/unwind_hints.h" 1
# 1 "./arch/x86/include/asm/orc_types.h" 1
# 85 "./arch/x86/include/asm/orc_types.h"
struct orc_entry {
s16 sp_offset;
s16 bp_offset;
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
unsigned end:1;
} __attribute__((__packed__));
struct unwind_hint {
u32 ip;
s16 sp_offset;
u8 sp_reg;
u8 type;
u8 end;
};
# 5 "./arch/x86/include/asm/unwind_hints.h" 2
# 27 "./arch/x86/include/asm/processor.h" 2
# 1 "./include/linux/personality.h" 1
# 1 "./include/uapi/linux/personality.h" 1
# 11 "./include/uapi/linux/personality.h"
enum {
UNAME26 = 0x0020000,
ADDR_NO_RANDOMIZE = 0x0040000,
FDPIC_FUNCPTRS = 0x0080000,
MMAP_PAGE_ZERO = 0x0100000,
ADDR_COMPAT_LAYOUT = 0x0200000,
READ_IMPLIES_EXEC = 0x0400000,
ADDR_LIMIT_32BIT = 0x0800000,
SHORT_INODE = 0x1000000,
WHOLE_SECONDS = 0x2000000,
STICKY_TIMEOUTS = 0x4000000,
ADDR_LIMIT_3GB = 0x8000000,
};
# 42 "./include/uapi/linux/personality.h"
enum {
PER_LINUX = 0x0000,
PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
WHOLE_SECONDS | SHORT_INODE,
PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
PER_BSD = 0x0006,
PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
PER_LINUX32 = 0x0008,
PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,
PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,
PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,
PER_RISCOS = 0x000c,
PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
PER_OSF4 = 0x000f,
PER_HPUX = 0x0010,
PER_MASK = 0x00ff,
};
# 6 "./include/linux/personality.h" 2
# 29 "./arch/x86/include/asm/processor.h" 2
# 1 "./include/linux/err.h" 1
# 1 "./arch/x86/include/uapi/asm/errno.h" 1
# 9 "./include/linux/err.h" 2
# 24 "./include/linux/err.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) ERR_PTR(long error)
{
return (void *) error;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __attribute__((__warn_unused_result__)) PTR_ERR( const void *ptr)
{
return (long) ptr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) IS_ERR( const void *ptr)
{
return __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) IS_ERR_OR_NULL( const void *ptr)
{
return __builtin_expect(!!(!ptr), 0) || __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0);
}
# 51 "./include/linux/err.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) ERR_CAST( const void *ptr)
{
return (void *) ptr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) PTR_ERR_OR_ZERO( const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
# 33 "./arch/x86/include/asm/processor.h" 2
# 59 "./arch/x86/include/asm/processor.h"
enum tlb_infos {
ENTRIES,
NR_INFO
};
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_4k[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_2m[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_4m[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_4k[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_2m[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_4m[NR_INFO];
extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_1g[NR_INFO];
struct cpuinfo_x86 {
__u8 x86;
__u8 x86_vendor;
__u8 x86_model;
__u8 x86_stepping;
int x86_tlbsize;
__u8 x86_virt_bits;
__u8 x86_phys_bits;
__u8 x86_coreid_bits;
__u8 cu_id;
__u32 extended_cpuid_level;
int cpuid_level;
__u32 x86_capability[19 + 1];
char x86_vendor_id[16];
char x86_model_id[64];
unsigned int x86_cache_size;
int x86_cache_alignment;
int x86_cache_max_rmid;
int x86_cache_occ_scale;
int x86_power;
unsigned long loops_per_jiffy;
u16 x86_max_cores;
u16 apicid;
u16 initial_apicid;
u16 x86_clflush_size;
u16 booted_cores;
u16 phys_proc_id;
u16 logical_proc_id;
u16 cpu_core_id;
u16 cpu_index;
u32 microcode;
u8 x86_cache_bits;
unsigned initialized : 1;
} ;
struct cpuid_regs {
u32 eax, ebx, ecx, edx;
};
enum cpuid_regs_idx {
CPUID_EAX = 0,
CPUID_EBX,
CPUID_ECX,
CPUID_EDX,
};
# 154 "./arch/x86/include/asm/processor.h"
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern struct x86_hw_tss doublefault_tss;
extern __u32 cpu_caps_cleared[19 + 1];
extern __u32 cpu_caps_set[19 + 1];
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(struct cpuinfo_x86) cpu_info;
extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long l1tf_pfn_limit(void)
{
return (1ULL << (boot_cpu_data.x86_cache_bits - 1 - 12));
}
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int have_cpuid_p(void)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
# 220 "./arch/x86/include/asm/processor.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_eax(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return eax; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_ebx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return ebx; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_ecx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return ecx; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_edx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return edx; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr3_pa(void)
{
return __read_cr3() & (0x7FFFFFFFFFFFF000ull);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr3_pa(void)
{
return __native_read_cr3() & (0x7FFFFFFFFFFFF000ull);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_cr3(pgd_t *pgdir)
{
write_cr3((__phys_addr_nodebug((unsigned long)(pgdir)) | 0ULL));
}
# 297 "./arch/x86/include/asm/processor.h"
struct x86_hw_tss {
u32 reserved1;
u64 sp0;
u64 sp1;
u64 sp2;
u64 reserved2;
u64 ist[7];
u32 reserved3;
u32 reserved4;
u16 reserved5;
u16 io_bitmap_base;
} __attribute__((packed));
# 333 "./arch/x86/include/asm/processor.h"
struct entry_stack {
unsigned long words[64];
};
struct entry_stack_page {
struct entry_stack stack;
} __attribute__((__aligned__(((1UL) << 12))));
struct tss_struct {
struct x86_hw_tss x86_tss;
unsigned long io_bitmap[((65536/8)/sizeof(long)) + 1];
} __attribute__((__aligned__(((1UL) << 12))));
extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct tss_struct) cpu_tss_rw __attribute__((__aligned__(((1UL) << 12))));
# 380 "./arch/x86/include/asm/processor.h"
struct orig_ist {
unsigned long ist[7];
};
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct orig_ist) orig_ist;
union irq_stack_union {
char irq_stack[(((1UL) << 12) << (2 + 0))];
struct {
char gs_base[40];
unsigned long stack_canary;
};
};
extern __attribute__((section(".data..percpu" "..first"))) __typeof__(union irq_stack_union) irq_stack_union ;
extern typeof(irq_stack_union) init_per_cpu__irq_stack_union;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long cpu_kernelmode_gs_base(int cpu)
{
return (unsigned long)(*({ do { const void *__vpp_verify = (typeof((&(irq_stack_union.gs_base)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(irq_stack_union.gs_base)))) *)((&(irq_stack_union.gs_base)))); (typeof((typeof(*((&(irq_stack_union.gs_base)))) *)((&(irq_stack_union.gs_base))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}
extern __attribute__((section(".data..percpu" ""))) __typeof__(char *) irq_stack_ptr;
extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned int) irq_count;
extern void ignore_sysret(void);
# 441 "./arch/x86/include/asm/processor.h"
extern unsigned int fpu_kernel_xstate_size;
extern unsigned int fpu_user_xstate_size;
struct perf_event;
typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_struct {
struct desc_struct tls_array[3];
unsigned long sp;
unsigned short es;
unsigned short ds;
unsigned short fsindex;
unsigned short gsindex;
unsigned long fsbase;
unsigned long gsbase;
# 479 "./arch/x86/include/asm/processor.h"
struct perf_event *ptrace_bps[4];
unsigned long debugreg6;
unsigned long ptrace_dr7;
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
unsigned long *io_bitmap_ptr;
unsigned long iopl;
unsigned io_bitmap_max;
mm_segment_t addr_limit;
unsigned int sig_on_uaccess_err:1;
unsigned int uaccess_err:1;
struct fpu fpu;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = __builtin_offsetof(struct thread_struct, fpu.state);
*size = fpu_kernel_xstate_size;
}
# 531 "./arch/x86/include/asm/processor.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_iopl_mask(unsigned mask)
{
# 545 "./arch/x86/include/asm/processor.h"
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
native_load_sp0(unsigned long sp0)
{
do { do { const void *__vpp_verify = (typeof((&(cpu_tss_rw.x86_tss.sp0)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_tss_rw.x86_tss.sp0)) { case 1: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_swapgs(void)
{
asm volatile("swapgs" ::: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long current_top_of_stack(void)
{
return ({ typeof(cpu_tss_rw.x86_tss.sp1) pfo_ret__; switch (sizeof(cpu_tss_rw.x86_tss.sp1)) { case 1: asm("mov" "b ""%%""gs"":" "%" "P1"",%0" : "=q" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 2: asm("mov" "w ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 4: asm("mov" "l ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 8: asm("mov" "q ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; default: __bad_percpu_size(); } pfo_ret__; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool on_thread_stack(void)
{
return (unsigned long)(current_top_of_stack() -
current_stack_pointer) < (((1UL) << 12) << (2 + 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_sp0(unsigned long sp0)
{
native_load_sp0(sp0);
}
extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
native_cpuid(eax, ebx, ecx, edx);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuid_count(unsigned int op, int count,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = count;
native_cpuid(eax, ebx, ecx, edx);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_eax(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return eax;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ebx;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ecx;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return edx;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void rep_nop(void)
{
asm volatile("rep; nop" ::: "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void cpu_relax(void)
{
rep_nop();
}
# 682 "./arch/x86/include/asm/processor.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sync_core(void)
{
# 715 "./arch/x86/include/asm/processor.h"
unsigned int tmp;
asm volatile (
"987: \n\t" ".pushsection .discard.unwind_hints\n\t" ".long 987b - .\n\t" ".short " "0" "\n\t" ".byte " "0" "\n\t" ".byte " "3" "\n\t" ".byte " "0" "\n\t" ".balign 4 \n\t" ".popsection\n\t"
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"987: \n\t" ".pushsection .discard.unwind_hints\n\t" ".long 987b - .\n\t" ".short " "0" "\n\t" ".byte " "0" "\n\t" ".byte " "4" "\n\t" ".byte " "0" "\n\t" ".balign 4 \n\t" ".popsection\n\t"
"1:"
: "=&r" (tmp), "+r" (current_stack_pointer) : : "cc", "memory");
}
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void amd_e400_c1e_apic_setup(void);
extern unsigned long boot_option_idle_override;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
IDLE_POLL};
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
void early_trap_pf_init(void);
extern struct desc_ptr early_gdt_descr;
extern void switch_to_new_gdt(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_debugctlmsr(void)
{
unsigned long debugctlmsr = 0;
((debugctlmsr) = native_read_msr((0x000001d9)));
return debugctlmsr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_debugctlmsr(unsigned long debugctlmsr)
{
wrmsrl(0x000001d9, debugctlmsr);
}
extern void set_task_blockstep(struct task_struct *task, bool on);
extern int bootloader_type;
extern int bootloader_version;
extern char ignore_fpu_irq;
# 803 "./arch/x86/include/asm/processor.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prefetch(const void *x)
{
asm volatile ("661:\n\t" "prefetcht0 %P1" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 0*32+25)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "prefetchnta %P1" "\n" "665""1" ":\n\t" ".popsection\n" : : "i" (0), "m" (*(const char *)x));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prefetchw(const void *x)
{
asm volatile ("661:\n\t" "prefetcht0 %P1" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 6*32+ 8)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "prefetchw %P1" "\n" "665""1" ":\n\t" ".popsection\n" : : "i" (0), "m" (*(const char *)x));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void spin_lock_prefetch(const void *x)
{
prefetchw(x);
}
# 903 "./arch/x86/include/asm/processor.h"
extern unsigned long KSTK_ESP(struct task_struct *task);
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp);
# 923 "./arch/x86/include/asm/processor.h"
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
extern __attribute__((section(".data..percpu" ""))) __typeof__(u64) msr_misc_features_shadow;
# 936 "./arch/x86/include/asm/processor.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mpx_enable_management(void)
{
return -22;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mpx_disable_management(void)
{
return -22;
}
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
uint32_t base, eax, signature[3];
for (base = 0x40000000; base < 0x40010000; base += 0x100) {
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
if (!memcmp(sig, signature, 12) &&
(leaves == 0 || ((eax - base) >= leaves)))
return base;
}
return 0;
}
extern unsigned long arch_align_stack(unsigned long sp);
void free_init_pages(const char *what, unsigned long begin, unsigned long end);
extern void free_kernel_image_pages(void *begin, void *end);
void default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
void microcode_check(void);
enum l1tf_mitigations {
L1TF_MITIGATION_OFF,
L1TF_MITIGATION_FLUSH_NOWARN,
L1TF_MITIGATION_FLUSH,
L1TF_MITIGATION_FLUSH_NOSMT,
L1TF_MITIGATION_FULL,
L1TF_MITIGATION_FULL_FORCE
};
extern enum l1tf_mitigations l1tf_mitigation;
# 6 "./arch/x86/include/asm/cpufeature.h" 2
enum cpuid_leafs
{
CPUID_1_EDX = 0,
CPUID_8000_0001_EDX,
CPUID_8086_0001_EDX,
CPUID_LNX_1,
CPUID_1_ECX,
CPUID_C000_0001_EDX,
CPUID_8000_0001_ECX,
CPUID_LNX_2,
CPUID_LNX_3,
CPUID_7_0_EBX,
CPUID_D_1_EAX,
CPUID_F_0_EDX,
CPUID_F_1_EDX,
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
CPUID_7_ECX,
CPUID_8000_0007_EBX,
CPUID_7_EDX,
};
extern const char * const x86_cap_flags[19*32];
extern const char * const x86_power_flags[32];
# 49 "./arch/x86/include/asm/cpufeature.h"
extern const char * const x86_bug_flags[1*32];
# 133 "./arch/x86/include/asm/cpufeature.h"
extern void setup_clear_cpu_cap(unsigned int bit);
extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
# 54 "./arch/x86/include/asm/thread_info.h" 2
struct thread_info {
unsigned long flags;
u32 status;
};
# 178 "./arch/x86/include/asm/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_within_stack_frames(const void * const stack,
const void * const stackend,
const void *obj, unsigned long len)
{
# 210 "./arch/x86/include/asm/thread_info.h"
return NOT_STACK;
}
# 243 "./arch/x86/include/asm/thread_info.h"
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
extern void arch_setup_new_exec(void);
# 39 "./include/linux/thread_info.h" 2
# 53 "./include/linux/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_ti_thread_flag(struct thread_info *ti, int flag,
bool value)
{
if (value)
set_ti_thread_flag(ti, flag);
else
clear_ti_thread_flag(ti, flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return (__builtin_constant_p((flag)) ? constant_test_bit((flag), ((unsigned long *)&ti->flags)) : variable_test_bit((flag), ((unsigned long *)&ti->flags)));
}
# 122 "./include/linux/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_object_size(const void *ptr, unsigned long n,
bool to_user)
{ }
extern void
__bad_copy_from(void);
extern void
__bad_copy_to(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_overflow(int size, unsigned long count)
{
({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __warn_printk("Buffer overflow detected (%d < %lu)!\n", size, count); do { do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("./include/linux/thread_info.h"), "i" (134), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (0)); }); } while (0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = -1;
if (__builtin_expect(!!(sz >= 0 && sz < bytes), 0)) {
if (!__builtin_constant_p(bytes))
copy_overflow(sz, bytes);
else if (is_source)
__bad_copy_from();
else
__bad_copy_to();
return false;
}
check_object_size(addr, bytes, is_source);
return true;
}
# 8 "./arch/x86/include/asm/preempt.h" 2
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) __preempt_count;
# 24 "./arch/x86/include/asm/preempt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int preempt_count(void)
{
return ({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) & ~0x80000000;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void preempt_count_set(int pc)
{
int old, new;
do {
old = ({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; });
new = (old & 0x80000000) |
(pc & ~0x80000000);
} while (({ typeof(__preempt_count) pco_ret__; typeof(__preempt_count) pco_old__ = (old); typeof(__preempt_count) pco_new__ = (new); switch (sizeof(__preempt_count)) { case 1: asm("cmpxchgb %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "q" (pco_new__), "0" (pco_old__) : "memory"); break; case 2: asm("cmpxchgw %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; case 4: asm("cmpxchgl %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; case 8: asm("cmpxchgq %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; default: __bad_percpu_size(); } pco_ret__; }) != old);
}
# 58 "./arch/x86/include/asm/preempt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void set_preempt_need_resched(void)
{
do { typedef typeof((__preempt_count)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (~0x80000000); (void)pto_tmp__; } switch (sizeof((__preempt_count))) { case 1: asm("and" "b %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pto_T__)(~0x80000000))); break; case 2: asm("and" "w %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(~0x80000000))); break; case 4: asm("and" "l %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(~0x80000000))); break; case 8: asm("and" "q %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pto_T__)(~0x80000000))); break; default: __bad_percpu_size(); } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clear_preempt_need_resched(void)
{
do { typedef typeof((__preempt_count)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0x80000000); (void)pto_tmp__; } switch (sizeof((__preempt_count))) { case 1: asm("or" "b %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pto_T__)(0x80000000))); break; case 2: asm("or" "w %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(0x80000000))); break; case 4: asm("or" "l %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(0x80000000))); break; case 8: asm("or" "q %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pto_T__)(0x80000000))); break; default: __bad_percpu_size(); } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool test_preempt_need_resched(void)
{
return !(({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) & 0x80000000);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __preempt_count_add(int val)
{
do { typedef typeof((__preempt_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(val) && ((val) == 1 || (val) == -1)) ? (int)(val) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (val); (void)pao_tmp__; } switch (sizeof((__preempt_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pao_T__)(val))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(val))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(val))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pao_T__)(val))); break; default: __bad_percpu_size(); } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __preempt_count_sub(int val)
{
do { typedef typeof((__preempt_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-val) && ((-val) == 1 || (-val) == -1)) ? (int)(-val) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-val); (void)pao_tmp__; } switch (sizeof((__preempt_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pao_T__)(-val))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(-val))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(-val))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pao_T__)(-val))); break; default: __bad_percpu_size(); } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool __preempt_count_dec_and_test(void)
{
return ({ bool c = false; asm goto("decl" " " "%%""gs"":" "%" "[var]" "; j" "e" " %l[cc_label]" : : [var] "m" (__preempt_count) : "memory" : cc_label); if (0) { cc_label: c = true; } c; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool should_resched(int preempt_offset)
{
return __builtin_expect(!!(({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) == preempt_offset), 0);
}
# 79 "./include/linux/preempt.h" 2
# 41 "./include/linux/rcupdate.h" 2
# 1 "./include/linux/bottom_half.h" 1
# 10 "./include/linux/bottom_half.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
__preempt_count_add(cnt);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_disable(void)
{
__local_bh_disable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8))));
}
extern void _local_bh_enable(void);
extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_enable_ip(unsigned long ip)
{
__local_bh_enable_ip(ip, (2 * (1UL << (0 + 8))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_enable(void)
{
__local_bh_enable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8))));
}
# 42 "./include/linux/rcupdate.h" 2
# 1 "./include/linux/lockdep.h" 1
# 13 "./include/linux/lockdep.h"
struct task_struct;
struct lockdep_map;
extern int prove_locking;
extern int lock_stat;
# 389 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_off(void)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_on(void)
{
}
# 426 "./include/linux/lockdep.h"
struct lock_class_key { };
struct lockdep_map { };
# 444 "./include/linux/lockdep.h"
struct pin_cookie { };
# 454 "./include/linux/lockdep.h"
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
XHLOCK_CTX_NR,
};
# 468 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_invariant_state(bool force) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_init_task(struct task_struct *task) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_free_task(struct task_struct *task) {}
# 531 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void print_irqtrace_events(struct task_struct *curr)
{
}
# 614 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
# 43 "./include/linux/rcupdate.h" 2
void call_rcu(struct callback_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void synchronize_rcu(void);
# 70 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __rcu_read_lock(void)
{
if (0)
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __rcu_read_unlock(void)
{
if (0)
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcu_preempt_depth(void)
{
return 0;
}
void rcu_init(void);
extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly")));
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
# 108 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_user_enter(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_user_exit(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_init_nohz(void) { }
# 163 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exit_tasks_rcu_start(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exit_tasks_rcu_finish(void) { }
# 186 "./include/linux/rcupdate.h"
# 1 "./include/linux/rcutree.h" 1
# 33 "./include/linux/rcutree.h"
void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_virt_note_context_switch(int cpu)
{
rcu_note_context_switch(false);
}
void synchronize_rcu_expedited(void);
void kfree_call_rcu(struct callback_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
void exit_rcu(void);
void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly")));
void rcu_end_inkernel_boot(void);
bool rcu_is_watching(void);
void rcu_all_qs(void);
int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
int rcutree_offline_cpu(unsigned int cpu);
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
# 187 "./include/linux/rcupdate.h" 2
# 207 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_rcu_head(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_rcu_head(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_rcu_head_on_stack(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_rcu_head_on_stack(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rcu_lockdep_current_cpu_online(void) { return true; }
# 245 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcu_read_lock_held(void)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcu_read_lock_bh_held(void)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcu_read_lock_sched_held(void)
{
return !0;
}
# 603 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_lock(void)
{
__rcu_read_lock();
(void)0;
do { } while (0);
do { } while (0);
}
# 655 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock(void)
{
do { } while (0);
(void)0;
__rcu_read_unlock();
do { } while (0);
}
# 676 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_lock_bh(void)
{
local_bh_disable();
(void)0;
do { } while (0);
do { } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock_bh(void)
{
do { } while (0);
do { } while (0);
(void)0;
local_bh_enable();
}
# 711 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_lock_sched(void)
{
__asm__ __volatile__("" : : : "memory");
(void)0;
do { } while (0);
do { } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void)
{
__asm__ __volatile__("" : : : "memory");
(void)0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched(void)
{
do { } while (0);
do { } while (0);
(void)0;
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void)
{
(void)0;
__asm__ __volatile__("" : : : "memory");
}
# 872 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_head_init(struct callback_head *rhp)
{
rhp->func = (rcu_callback_t)~0L;
}
# 890 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool
rcu_head_after_call_rcu(struct callback_head *rhp, rcu_callback_t f)
{
if (({ union { typeof(rhp->func) __val; char __c[1]; } __u; if (1) __read_once_size(&(rhp->func), __u.__c, sizeof(rhp->func)); else __read_once_size_nocheck(&(rhp->func), __u.__c, sizeof(rhp->func)); do { } while (0); __u.__val; }) == f)
return true;
({ int __ret_warn_on = !!(({ union { typeof(rhp->func) __val; char __c[1]; } __u; if (1) __read_once_size(&(rhp->func), __u.__c, sizeof(rhp->func)); else __read_once_size_nocheck(&(rhp->func), __u.__c, sizeof(rhp->func)); do { } while (0); __u.__val; }) != (rcu_callback_t)~0L); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("./include/linux/rcupdate.h"), "i" (895), "i" ((1 << 0)|((1 << 1)|((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void synchronize_rcu_bh(void)
{
synchronize_rcu();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void synchronize_rcu_bh_expedited(void)
{
synchronize_rcu_expedited();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void call_rcu_bh(struct callback_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_barrier_bh(void)
{
rcu_barrier();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void synchronize_sched(void)
{
synchronize_rcu();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void synchronize_sched_expedited(void)
{
synchronize_rcu_expedited();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void call_rcu_sched(struct callback_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_barrier_sched(void)
{
rcu_barrier();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_state_synchronize_sched(void)
{
return get_state_synchronize_rcu();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cond_synchronize_sched(unsigned long oldstate)
{
cond_synchronize_rcu(oldstate);
}
# 12 "./include/linux/rculist.h" 2
# 31 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_LIST_HEAD_RCU(struct list_head *list)
{
({ union { typeof(list->next) __val; char __c[1]; } __u = { .__val = ( typeof(list->next)) (list) }; __write_once_size(&(list->next), __u.__c, sizeof(list->next)); __u.__val; });
({ union { typeof(list->prev) __val; char __c[1]; } __u = { .__val = ( typeof(list->prev)) (list) }; __write_once_size(&(list->prev), __u.__c, sizeof(list->prev)); __u.__val; });
}
# 49 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;
new->next = next;
new->prev = prev;
({ uintptr_t _r_a_p__v = (uintptr_t)(new); if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct list_head **)(&(prev)->next))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct list_head **)(&(prev)->next)))))) ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct list_head **)(&(prev)->next))))), __u.__c, sizeof(((*((struct list_head **)(&(prev)->next)))))); __u.__val; }); else do { do { extern void __compiletime_assert_57(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_57(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct list_head **)(&(prev)->next)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct list_head **)(&(prev)->next))))) ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct list_head **)(&(prev)->next)))), __u.__c, sizeof(*&(*((struct list_head **)(&(prev)->next))))); __u.__val; }); } while (0); _r_a_p__v; });
next->prev = new;
}
# 77 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
# 98 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
# 128 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del_rcu(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 154 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
n->pprev = ((void *)0);
}
}
# 170 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
({ uintptr_t _r_a_p__v = (uintptr_t)(new); if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct list_head **)(&(new->prev)->next))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct list_head **)(&(new->prev)->next)))))) ((typeof((*((struct list_head **)(&(new->prev)->next)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct list_head **)(&(new->prev)->next))))), __u.__c, sizeof(((*((struct list_head **)(&(new->prev)->next)))))); __u.__val; }); else do { do { extern void __compiletime_assert_175(void) ; if (!((sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long)))) __compiletime_assert_175(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct list_head **)(&(new->prev)->next)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct list_head **)(&(new->prev)->next))))) ((typeof(*((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct list_head **)(&(new->prev)->next)))), __u.__c, sizeof(*&(*((struct list_head **)(&(new->prev)->next))))); __u.__val; }); } while (0); _r_a_p__v; });
new->next->prev = new;
old->prev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 199 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_splice_init_rcu(struct list_head *list,
struct list_head *prev,
struct list_head *next,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
INIT_LIST_HEAD_RCU(list);
# 222 "./include/linux/rculist.h"
sync();
# 232 "./include/linux/rculist.h"
last->next = next;
({ uintptr_t _r_a_p__v = (uintptr_t)(first); if (__builtin_constant_p(first) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct list_head **)(&(prev)->next))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct list_head **)(&(prev)->next)))))) ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct list_head **)(&(prev)->next))))), __u.__c, sizeof(((*((struct list_head **)(&(prev)->next)))))); __u.__val; }); else do { do { extern void __compiletime_assert_233(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_233(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct list_head **)(&(prev)->next)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct list_head **)(&(prev)->next))))) ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct list_head **)(&(prev)->next)))), __u.__c, sizeof(*&(*((struct list_head **)(&(prev)->next))))); __u.__val; }); } while (0); _r_a_p__v; });
first->prev = prev;
next->prev = last;
}
# 245 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head, head->next, sync);
}
# 260 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head->prev, head, sync);
}
# 453 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 466 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;
new->next = next;
new->pprev = old->pprev;
({ uintptr_t _r_a_p__v = (uintptr_t)(new); if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof((*(struct hlist_node **)new->pprev)) __val; char __c[1]; } __u = { .__val = ( typeof((*(struct hlist_node **)new->pprev))) ((typeof(*(struct hlist_node **)new->pprev))(_r_a_p__v)) }; __write_once_size(&((*(struct hlist_node **)new->pprev)), __u.__c, sizeof((*(struct hlist_node **)new->pprev))); __u.__val; }); else do { do { extern void __compiletime_assert_473(void) ; if (!((sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(char) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(short) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(int) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long)))) __compiletime_assert_473(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&*(struct hlist_node **)new->pprev) __val; char __c[1]; } __u = { .__val = ( typeof(*&*(struct hlist_node **)new->pprev)) ((typeof(*((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)) *)((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)) }; __write_once_size(&(*&*(struct hlist_node **)new->pprev), __u.__c, sizeof(*&*(struct hlist_node **)new->pprev)); __u.__val; }); } while (0); _r_a_p__v; });
if (next)
new->next->pprev = &new->next;
old->pprev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 505 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
({ uintptr_t _r_a_p__v = (uintptr_t)(n); if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct hlist_node **)(&(h)->first))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct hlist_node **)(&(h)->first)))))) ((typeof((*((struct hlist_node **)(&(h)->first)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct hlist_node **)(&(h)->first))))), __u.__c, sizeof(((*((struct hlist_node **)(&(h)->first)))))); __u.__val; }); else do { do { extern void __compiletime_assert_512(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long)))) __compiletime_assert_512(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct hlist_node **)(&(h)->first)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct hlist_node **)(&(h)->first))))) ((typeof(*((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct hlist_node **)(&(h)->first)))), __u.__c, sizeof(*&(*((struct hlist_node **)(&(h)->first))))); __u.__val; }); } while (0); _r_a_p__v; });
if (first)
first->pprev = &n->next;
}
# 536 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_tail_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *i, *last = ((void *)0);
for (i = h->first; i; i = i->next)
last = i;
if (last) {
n->next = last->next;
n->pprev = &last->next;
({ uintptr_t _r_a_p__v = (uintptr_t)(n); if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct hlist_node **)(&(last)->next))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct hlist_node **)(&(last)->next)))))) ((typeof((*((struct hlist_node **)(&(last)->next)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct hlist_node **)(&(last)->next))))), __u.__c, sizeof(((*((struct hlist_node **)(&(last)->next)))))); __u.__val; }); else do { do { extern void __compiletime_assert_548(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)))) __compiletime_assert_548(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct hlist_node **)(&(last)->next)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct hlist_node **)(&(last)->next))))) ((typeof(*((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct hlist_node **)(&(last)->next)))), __u.__c, sizeof(*&(*((struct hlist_node **)(&(last)->next))))); __u.__val; }); } while (0); _r_a_p__v; });
} else {
hlist_add_head_rcu(n, h);
}
}
# 572 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
({ uintptr_t _r_a_p__v = (uintptr_t)(n); if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct hlist_node **)((n)->pprev))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct hlist_node **)((n)->pprev)))))) ((typeof((*((struct hlist_node **)((n)->pprev)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct hlist_node **)((n)->pprev))))), __u.__c, sizeof(((*((struct hlist_node **)((n)->pprev)))))); __u.__val; }); else do { do { extern void __compiletime_assert_577(void) ; if (!((sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long)))) __compiletime_assert_577(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct hlist_node **)((n)->pprev)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct hlist_node **)((n)->pprev))))) ((typeof(*((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct hlist_node **)((n)->pprev)))), __u.__c, sizeof(*&(*((struct hlist_node **)((n)->pprev))))); __u.__val; }); } while (0); _r_a_p__v; });
next->pprev = &n->next;
}
# 599 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
n->pprev = &prev->next;
({ uintptr_t _r_a_p__v = (uintptr_t)(n); if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof(((*((struct hlist_node **)(&(prev)->next))))) __val; char __c[1]; } __u = { .__val = ( typeof(((*((struct hlist_node **)(&(prev)->next)))))) ((typeof((*((struct hlist_node **)(&(prev)->next)))))(_r_a_p__v)) }; __write_once_size(&(((*((struct hlist_node **)(&(prev)->next))))), __u.__c, sizeof(((*((struct hlist_node **)(&(prev)->next)))))); __u.__val; }); else do { do { extern void __compiletime_assert_604(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_604(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(*((struct hlist_node **)(&(prev)->next)))) __val; char __c[1]; } __u = { .__val = ( typeof(*&(*((struct hlist_node **)(&(prev)->next))))) ((typeof(*((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)) }; __write_once_size(&(*&(*((struct hlist_node **)(&(prev)->next)))), __u.__c, sizeof(*&(*((struct hlist_node **)(&(prev)->next))))); __u.__val; }); } while (0); _r_a_p__v; });
if (n->next)
n->next->pprev = &n->next;
}
# 6 "./include/linux/pid.h" 2
enum pid_type
{
PIDTYPE_PID,
PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
};
# 52 "./include/linux/pid.h"
struct upid {
int nr;
struct pid_namespace *ns;
};
struct pid
{
atomic_t count;
unsigned int level;
struct hlist_head tasks[PIDTYPE_MAX];
struct callback_head rcu;
struct upid numbers[1];
};
extern struct pid init_struct_pid;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *get_pid(struct pid *pid)
{
if (pid)
atomic_inc(&pid->count);
return pid;
}
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
# 104 "./include/linux/pid.h"
extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
extern struct pid *find_vpid(int nr);
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
# 128 "./include/linux/pid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid_namespace *ns_of_pid(struct pid *pid)
{
struct pid_namespace *ns = ((void *)0);
if (pid)
ns = pid->numbers[pid->level].ns;
return ns;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_child_reaper(struct pid *pid)
{
return pid->numbers[pid->level].nr == 1;
}
# 158 "./include/linux/pid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t pid_nr(struct pid *pid)
{
pid_t nr = 0;
if (pid)
nr = pid->numbers[0].nr;
return nr;
}
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
pid_t pid_vnr(struct pid *pid);
# 15 "./include/linux/sched.h" 2
# 1 "./include/linux/sem.h" 1
# 1 "./include/uapi/linux/sem.h" 1
# 1 "./include/linux/ipc.h" 1
# 1 "./include/linux/spinlock.h" 1
# 82 "./include/linux/spinlock.h"
# 1 "./include/linux/spinlock_types.h" 1
# 13 "./include/linux/spinlock_types.h"
# 1 "./arch/x86/include/asm/spinlock_types.h" 1
# 16 "./arch/x86/include/asm/spinlock_types.h"
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
# 27 "./arch/x86/include/asm/spinlock_types.h"
# 1 "./include/asm-generic/qspinlock_types.h" 1
# 31 "./include/asm-generic/qspinlock_types.h"
typedef struct qspinlock {
union {
atomic_t val;
struct {
u8 locked;
u8 pending;
};
struct {
u16 locked_pending;
u16 tail;
};
# 60 "./include/asm-generic/qspinlock_types.h"
};
} arch_spinlock_t;
# 28 "./arch/x86/include/asm/spinlock_types.h" 2
# 1 "./include/asm-generic/qrwlock_types.h" 1
# 1 "./arch/x86/include/asm/spinlock_types.h" 1
# 8 "./include/asm-generic/qrwlock_types.h" 2
typedef struct qrwlock {
union {
atomic_t cnts;
struct {
u8 wlocked;
u8 __lstate[3];
};
};
arch_spinlock_t wait_lock;
} arch_rwlock_t;
# 30 "./arch/x86/include/asm/spinlock_types.h" 2
# 14 "./include/linux/spinlock_types.h" 2
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
} raw_spinlock_t;
# 61 "./include/linux/spinlock_types.h"
typedef struct spinlock {
union {
struct raw_spinlock rlock;
# 72 "./include/linux/spinlock_types.h"
};
} spinlock_t;
# 83 "./include/linux/spinlock_types.h"
# 1 "./include/linux/rwlock_types.h" 1
# 11 "./include/linux/rwlock_types.h"
typedef struct {
arch_rwlock_t raw_lock;
} rwlock_t;
# 84 "./include/linux/spinlock_types.h" 2
# 83 "./include/linux/spinlock.h" 2
# 1 "./arch/x86/include/asm/spinlock.h" 1
# 10 "./arch/x86/include/asm/spinlock.h"
# 1 "./arch/x86/include/asm/paravirt.h" 1
# 942 "./arch/x86/include/asm/paravirt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
# 11 "./arch/x86/include/asm/spinlock.h" 2
# 27 "./arch/x86/include/asm/spinlock.h"
# 1 "./arch/x86/include/asm/qspinlock.h" 1
# 14 "./arch/x86/include/asm/qspinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
u32 val;
val = ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "btsl" " %[val], " "%[var]" "; j" "c" " %l[cc_label]" : : [var] "m" (lock->val.counter), [val] "I" ((0 + 8)) : "memory" : cc_label); if (0) { cc_label: c = true; } c; }) * (1U << (0 + 8));
val |= atomic_read(&lock->val) & ~(((1U << 8) - 1) << (0 + 8));
return val;
}
# 90 "./arch/x86/include/asm/qspinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pv_lock_init(void)
{
}
# 1 "./include/asm-generic/qspinlock.h" 1
# 29 "./include/asm-generic/qspinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_is_locked(struct qspinlock *lock)
{
return atomic_read(&lock->val);
}
# 48 "./include/asm-generic/qspinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_value_unlocked(struct qspinlock lock)
{
return !atomic_read(&lock.val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_is_contended(struct qspinlock *lock)
{
return atomic_read(&lock->val) & ~(((1U << 8) - 1) << 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_trylock(struct qspinlock *lock)
{
u32 val = atomic_read(&lock->val);
if (__builtin_expect(!!(val), 0))
return 0;
return __builtin_expect(!!(atomic_try_cmpxchg(&lock->val, &val, (1U << 0))), 1);
}
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
if (__builtin_expect(!!(atomic_try_cmpxchg(&lock->val, &val, (1U << 0))), 1))
return;
queued_spin_lock_slowpath(lock, val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void queued_spin_unlock(struct qspinlock *lock)
{
do { do { extern void __compiletime_assert_103(void) ; if (!((sizeof(*&lock->locked) == sizeof(char) || sizeof(*&lock->locked) == sizeof(short) || sizeof(*&lock->locked) == sizeof(int) || sizeof(*&lock->locked) == sizeof(long)))) __compiletime_assert_103(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&lock->locked) __val; char __c[1]; } __u = { .__val = ( typeof(*&lock->locked)) (0) }; __write_once_size(&(*&lock->locked), __u.__c, sizeof(*&lock->locked)); __u.__val; }); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}
# 96 "./arch/x86/include/asm/qspinlock.h" 2
# 28 "./arch/x86/include/asm/spinlock.h" 2
# 43 "./arch/x86/include/asm/spinlock.h"
# 1 "./arch/x86/include/asm/qrwlock.h" 1
# 1 "./include/asm-generic/qrwlock.h" 1
# 39 "./include/asm-generic/qrwlock.h"
extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queued_read_trylock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_read(&lock->cnts);
if (__builtin_expect(!!(!(cnts & 0x1ff)), 1)) {
cnts = (u32)atomic_add_return((1U << 9), &lock->cnts);
if (__builtin_expect(!!(!(cnts & 0x1ff)), 1))
return 1;
atomic_sub((1U << 9), &lock->cnts);
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queued_write_trylock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_read(&lock->cnts);
if (__builtin_expect(!!(cnts), 0))
return 0;
return __builtin_expect(!!(atomic_try_cmpxchg(&lock->cnts, &cnts, 0x0ff)), 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_read_lock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_add_return((1U << 9), &lock->cnts);
if (__builtin_expect(!!(!(cnts & 0x1ff)), 1))
return;
queued_read_lock_slowpath(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_write_lock(struct qrwlock *lock)
{
u32 cnts = 0;
if (__builtin_expect(!!(atomic_try_cmpxchg(&lock->cnts, &cnts, 0x0ff)), 1))
return;
queued_write_lock_slowpath(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_read_unlock(struct qrwlock *lock)
{
(void)atomic_sub_return((1U << 9), &lock->cnts);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_write_unlock(struct qrwlock *lock)
{
do { do { extern void __compiletime_assert_125(void) ; if (!((sizeof(*&lock->wlocked) == sizeof(char) || sizeof(*&lock->wlocked) == sizeof(short) || sizeof(*&lock->wlocked) == sizeof(int) || sizeof(*&lock->wlocked) == sizeof(long)))) __compiletime_assert_125(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&lock->wlocked) __val; char __c[1]; } __u = { .__val = ( typeof(*&lock->wlocked)) (0) }; __write_once_size(&(*&lock->wlocked), __u.__c, sizeof(*&lock->wlocked)); __u.__val; }); } while (0);
}
# 7 "./arch/x86/include/asm/qrwlock.h" 2
# 44 "./arch/x86/include/asm/spinlock.h" 2
# 89 "./include/linux/spinlock.h" 2
# 177 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void do_raw_spin_lock(raw_spinlock_t *lock)
{
(void)0;
queued_spin_lock(&lock->raw_lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
{
(void)0;
queued_spin_lock(&lock->raw_lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int do_raw_spin_trylock(raw_spinlock_t *lock)
{
return queued_spin_trylock(&(lock)->raw_lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void do_raw_spin_unlock(raw_spinlock_t *lock)
{
queued_spin_unlock(&lock->raw_lock);
(void)0;
}
# 301 "./include/linux/spinlock.h"
# 1 "./include/linux/rwlock.h" 1
# 302 "./include/linux/spinlock.h" 2
# 1 "./include/linux/spinlock_api_smp.h" 1
# 18 "./include/linux/spinlock_api_smp.h"
int in_lock_functions(unsigned long addr);
void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
;
void __attribute__((section(".spinlock.text")))
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text")))
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
;
int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock);
int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock);
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text")))
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
;
# 86 "./include/linux/spinlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_spin_trylock(raw_spinlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
if (do_raw_spin_trylock(lock)) {
do { } while (0);
return 1;
}
__asm__ __volatile__("" : : : "memory");
return 0;
}
# 104 "./include/linux/spinlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
# 119 "./include/linux/spinlock_api_smp.h"
do_raw_spin_lock_flags(lock, &flags);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
do { arch_local_irq_disable(); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock(raw_spinlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { arch_local_irq_enable(); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
if (do_raw_spin_trylock(lock)) {
do { } while (0);
return 1;
}
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
return 0;
}
# 1 "./include/linux/rwlock_api_smp.h" 1
# 18 "./include/linux/rwlock_api_smp.h"
void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ;
unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock)
;
int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock);
int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock);
void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text")))
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
void __attribute__((section(".spinlock.text")))
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
# 117 "./include/linux/rwlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_read_trylock(rwlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
if (queued_read_trylock(&(lock)->raw_lock)) {
do { } while (0);
return 1;
}
__asm__ __volatile__("" : : : "memory");
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_write_trylock(rwlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
if (queued_write_trylock(&(lock)->raw_lock)) {
do { } while (0);
return 1;
}
__asm__ __volatile__("" : : : "memory");
return 0;
}
# 146 "./include/linux/rwlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock(rwlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_read_lock(&((lock))->raw_lock); } while (0);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock_irq(rwlock_t *lock)
{
do { arch_local_irq_disable(); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock_bh(rwlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
do { } while (0);
do {(void)0; queued_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_write_lock(&((lock))->raw_lock); } while (0);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock_irq(rwlock_t *lock)
{
do { arch_local_irq_disable(); } while (0);
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock_bh(rwlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
do { } while (0);
do {(void)0; queued_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock(rwlock_t *lock)
{
__asm__ __volatile__("" : : : "memory");
do { } while (0);
do {(void)0; queued_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock(rwlock_t *lock)
{
do { } while (0);
do {queued_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock(rwlock_t *lock)
{
do { } while (0);
do {queued_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
do { } while (0);
do {queued_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock_irq(rwlock_t *lock)
{
do { } while (0);
do {queued_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { arch_local_irq_enable(); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock_bh(rwlock_t *lock)
{
do { } while (0);
do {queued_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
do { } while (0);
do {queued_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_irq(rwlock_t *lock)
{
do { } while (0);
do {queued_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { arch_local_irq_enable(); } while (0);
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_bh(rwlock_t *lock)
{
do { } while (0);
do {queued_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + 0));
}
# 191 "./include/linux/spinlock_api_smp.h" 2
# 308 "./include/linux/spinlock.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock(spinlock_t *lock)
{
_raw_spin_lock(&lock->rlock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock_bh(spinlock_t *lock)
{
_raw_spin_lock_bh(&lock->rlock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock(spinlock_t *lock)
{
return (_raw_spin_trylock(&lock->rlock));
}
# 352 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock_irq(spinlock_t *lock)
{
_raw_spin_lock_irq(&lock->rlock);
}
# 367 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock(spinlock_t *lock)
{
__raw_spin_unlock(&lock->rlock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_bh(spinlock_t *lock)
{
_raw_spin_unlock_bh(&lock->rlock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_irq(spinlock_t *lock)
{
__raw_spin_unlock_irq(&lock->rlock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock_bh(spinlock_t *lock)
{
return (_raw_spin_trylock_bh(&lock->rlock));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock_irq(spinlock_t *lock)
{
return ({ do { arch_local_irq_disable(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { arch_local_irq_enable(); } while (0); 0; }); });
}
# 420 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_is_locked(spinlock_t *lock)
{
return queued_spin_is_locked(&(&lock->rlock)->raw_lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_is_contended(spinlock_t *lock)
{
return queued_spin_is_contended(&(&lock->rlock)->raw_lock);
}
# 445 "./include/linux/spinlock.h"
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
struct lock_class_key *key);
# 469 "./include/linux/spinlock.h"
void free_bucket_spinlocks(spinlock_t *locks);
# 6 "./include/linux/ipc.h" 2
# 1 "./include/linux/uidgid.h" 1
# 16 "./include/linux/uidgid.h"
# 1 "./include/linux/highuid.h" 1
# 35 "./include/linux/highuid.h"
extern int overflowuid;
extern int overflowgid;
extern void __bad_uid(void);
extern void __bad_gid(void);
# 82 "./include/linux/highuid.h"
extern int fs_overflowuid;
extern int fs_overflowgid;
# 17 "./include/linux/uidgid.h" 2
struct user_namespace;
extern struct user_namespace init_user_ns;
typedef struct {
uid_t val;
} kuid_t;
typedef struct {
gid_t val;
} kgid_t;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uid_t __kuid_val(kuid_t uid)
{
return uid.val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gid_t __kgid_val(kgid_t gid)
{
return gid.val;
}
# 61 "./include/linux/uidgid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_eq(kuid_t left, kuid_t right)
{
return __kuid_val(left) == __kuid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_eq(kgid_t left, kgid_t right)
{
return __kgid_val(left) == __kgid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_gt(kuid_t left, kuid_t right)
{
return __kuid_val(left) > __kuid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_gt(kgid_t left, kgid_t right)
{
return __kgid_val(left) > __kgid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_gte(kuid_t left, kuid_t right)
{
return __kuid_val(left) >= __kuid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_gte(kgid_t left, kgid_t right)
{
return __kgid_val(left) >= __kgid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_lt(kuid_t left, kuid_t right)
{
return __kuid_val(left) < __kuid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_lt(kgid_t left, kgid_t right)
{
return __kgid_val(left) < __kgid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_lte(kuid_t left, kuid_t right)
{
return __kuid_val(left) <= __kuid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_lte(kgid_t left, kgid_t right)
{
return __kgid_val(left) <= __kgid_val(right);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_valid(kuid_t uid)
{
return __kuid_val(uid) != (uid_t) -1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_valid(kgid_t gid)
{
return __kgid_val(gid) != (gid_t) -1;
}
# 143 "./include/linux/uidgid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kuid_t make_kuid(struct user_namespace *from, uid_t uid)
{
return (kuid_t){ uid };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kgid_t make_kgid(struct user_namespace *from, gid_t gid)
{
return (kgid_t){ gid };
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uid_t from_kuid(struct user_namespace *to, kuid_t kuid)
{
return __kuid_val(kuid);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gid_t from_kgid(struct user_namespace *to, kgid_t kgid)
{
return __kgid_val(kgid);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid)
{
uid_t uid = from_kuid(to, kuid);
if (uid == (uid_t)-1)
uid = overflowuid;
return uid;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid)
{
gid_t gid = from_kgid(to, kgid);
if (gid == (gid_t)-1)
gid = overflowgid;
return gid;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
{
return uid_valid(uid);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
{
return gid_valid(gid);
}
# 7 "./include/linux/ipc.h" 2
# 1 "./include/linux/rhashtable-types.h" 1
# 14 "./include/linux/rhashtable-types.h"
# 1 "./include/linux/mutex.h" 1
# 20 "./include/linux/mutex.h"
# 1 "./include/linux/osq_lock.h" 1
struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev;
int locked;
int cpu;
};
struct optimistic_spin_queue {
atomic_t tail;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void osq_lock_init(struct optimistic_spin_queue *lock)
{
atomic_set(&lock->tail, (0));
}
extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool osq_is_locked(struct optimistic_spin_queue *lock)
{
return atomic_read(&lock->tail) != (0);
}
# 21 "./include/linux/mutex.h" 2
# 1 "./include/linux/debug_locks.h" 1
struct task_struct;
extern int debug_locks __attribute__((__section__(".data..read_mostly")));
extern int debug_locks_silent __attribute__((__section__(".data..read_mostly")));
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __debug_locks_off(void)
{
return ({ typeof(&debug_locks) __ai_ptr = (&debug_locks); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((0))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); });
}
extern int debug_locks_off(void);
# 49 "./include/linux/debug_locks.h"
struct task_struct;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void debug_show_all_locks(void)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void debug_show_held_locks(struct task_struct *task)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_check_no_locks_freed(const void *from, unsigned long len)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_check_no_locks_held(void)
{
}
# 22 "./include/linux/mutex.h" 2
struct ww_acquire_ctx;
# 53 "./include/linux/mutex.h"
struct mutex {
atomic_long_t owner;
spinlock_t wait_lock;
struct optimistic_spin_queue osq;
struct list_head wait_list;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
}
struct mutex_waiter {
struct list_head list;
struct task_struct *task;
struct ww_acquire_ctx *ww_ctx;
};
# 102 "./include/linux/mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mutex_destroy(struct mutex *lock) {}
# 138 "./include/linux/mutex.h"
extern void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mutex_is_locked(struct mutex *lock)
{
return __mutex_owner(lock) != ((void *)0);
}
# 178 "./include/linux/mutex.h"
extern void mutex_lock(struct mutex *lock);
extern int __attribute__((__warn_unused_result__)) mutex_lock_interruptible(struct mutex *lock);
extern int __attribute__((__warn_unused_result__)) mutex_lock_killable(struct mutex *lock);
extern void mutex_lock_io(struct mutex *lock);
# 196 "./include/linux/mutex.h"
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
enum mutex_trylock_recursive_enum {
MUTEX_TRYLOCK_FAILED = 0,
MUTEX_TRYLOCK_SUCCESS = 1,
MUTEX_TRYLOCK_RECURSIVE,
};
# 223 "./include/linux/mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
if (__builtin_expect(!!(__mutex_owner(lock) == get_current()), 0))
return MUTEX_TRYLOCK_RECURSIVE;
return mutex_trylock(lock);
}
# 15 "./include/linux/rhashtable-types.h" 2
# 1 "./include/linux/workqueue.h" 1
# 1 "./include/linux/timer.h" 1
# 1 "./include/linux/ktime.h" 1
# 24 "./include/linux/ktime.h"
# 1 "./include/linux/time.h" 1
# 1 "./include/linux/seqlock.h" 1
# 48 "./include/linux/seqlock.h"
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
do { (void)(name); (void)(key); } while (0);
s->sequence = 0;
}
# 108 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned __read_seqcount_begin(const seqcount_t *s)
{
unsigned ret;
repeat:
ret = ({ union { typeof(s->sequence) __val; char __c[1]; } __u; if (1) __read_once_size(&(s->sequence), __u.__c, sizeof(s->sequence)); else __read_once_size_nocheck(&(s->sequence), __u.__c, sizeof(s->sequence)); do { } while (0); __u.__val; });
if (__builtin_expect(!!(ret & 1), 0)) {
cpu_relax();
goto repeat;
}
return ret;
}
# 130 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = ({ union { typeof(s->sequence) __val; char __c[1]; } __u; if (1) __read_once_size(&(s->sequence), __u.__c, sizeof(s->sequence)); else __read_once_size_nocheck(&(s->sequence), __u.__c, sizeof(s->sequence)); do { } while (0); __u.__val; });
__asm__ __volatile__("" : : : "memory");
return ret;
}
# 146 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
__asm__ __volatile__("" : : : "memory");
return ret;
}
# 162 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqcount_begin(const seqcount_t *s)
{
;
return raw_read_seqcount_begin(s);
}
# 182 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ({ union { typeof(s->sequence) __val; char __c[1]; } __u; if (1) __read_once_size(&(s->sequence), __u.__c, sizeof(s->sequence)); else __read_once_size_nocheck(&(s->sequence), __u.__c, sizeof(s->sequence)); do { } while (0); __u.__val; });
__asm__ __volatile__("" : : : "memory");
return ret & ~1;
}
# 203 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
return __builtin_expect(!!(s->sequence != start), 0);
}
# 218 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
__asm__ __volatile__("" : : : "memory");
return __read_seqcount_retry(s, start);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_end(seqcount_t *s)
{
__asm__ __volatile__("" : : : "memory");
s->sequence++;
}
# 272 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_barrier(seqcount_t *s)
{
s->sequence++;
__asm__ __volatile__("" : : : "memory");
s->sequence++;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int raw_read_seqcount_latch(seqcount_t *s)
{
int seq = ({ union { typeof(s->sequence) __val; char __c[1]; } __u; if (1) __read_once_size(&(s->sequence), __u.__c, sizeof(s->sequence)); else __read_once_size_nocheck(&(s->sequence), __u.__c, sizeof(s->sequence)); do { } while (0); __u.__val; });
return seq;
}
# 363 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_latch(seqcount_t *s)
{
__asm__ __volatile__("" : : : "memory");
s->sequence++;
__asm__ __volatile__("" : : : "memory");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
raw_write_seqcount_begin(s);
do { } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_begin(seqcount_t *s)
{
write_seqcount_begin_nested(s, 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_end(seqcount_t *s)
{
do { } while (0);
raw_write_seqcount_end(s);
}
# 398 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_invalidate(seqcount_t *s)
{
__asm__ __volatile__("" : : : "memory");
s->sequence+=2;
}
typedef struct {
struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;
# 431 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
return read_seqcount_retry(&sl->seqcount, start);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock_bh(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
write_seqcount_begin(&sl->seqcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock_irq(seqlock_t *sl)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __write_seqlock_irqsave(seqlock_t *sl)
{
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0);
write_seqcount_begin(&sl->seqcount);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);
}
# 526 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
{
if (!(*seq & 1))
*seq = read_seqbegin(lock);
else
read_seqlock_excl(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int need_seqretry(seqlock_t *lock, int seq)
{
return !(seq & 1) && read_seqretry(lock, seq);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void done_seqretry(seqlock_t *lock, int seq)
{
if (seq & 1)
read_sequnlock_excl(lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
{
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
unsigned long flags = 0;
if (!(*seq & 1))
*seq = read_seqbegin(lock);
else
do { flags = __read_seqlock_excl_irqsave(lock); } while (0);
return flags;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
# 7 "./include/linux/time.h" 2
extern struct timezone sys_tz;
int get_timespec64(struct timespec64 *ts,
const struct timespec *uts);
int put_timespec64(const struct timespec64 *ts,
struct timespec *uts);
int get_itimerspec64(struct itimerspec64 *it,
const struct itimerspec *uit);
int put_itimerspec64(const struct itimerspec64 *it,
struct itimerspec *uit);
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);
# 38 "./include/linux/time.h"
struct itimerval;
extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
extern long do_utimes(int dfd, const char *filename, struct timespec64 *times, int flags);
struct tm {
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
long tm_year;
int tm_wday;
int tm_yday;
};
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
# 1 "./include/linux/time32.h" 1
# 16 "./include/linux/time32.h"
typedef s32 old_time32_t;
struct old_timespec32 {
old_time32_t tv_sec;
s32 tv_nsec;
};
struct old_timeval32 {
old_time32_t tv_sec;
s32 tv_usec;
};
struct old_itimerspec32 {
struct old_timespec32 it_interval;
struct old_timespec32 it_value;
};
struct old_utimbuf32 {
old_time32_t actime;
old_time32_t modtime;
};
extern int get_old_timespec32(struct timespec64 *, const void *);
extern int put_old_timespec32(const struct timespec64 *, void *);
extern int get_old_itimerspec32(struct itimerspec64 *its,
const struct old_itimerspec32 *uits);
extern int put_old_itimerspec32(const struct itimerspec64 *its,
struct old_itimerspec32 *uits);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec timespec64_to_timespec(const struct timespec64 ts64)
{
return *(const struct timespec *)&ts64;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec64 timespec_to_timespec64(const struct timespec ts)
{
return *(const struct timespec64 *)&ts;
}
# 79 "./include/linux/time32.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec_equal(const struct timespec *a,
const struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec_valid(const struct timespec *ts)
{
if (ts->tv_sec < 0)
return false;
if ((unsigned long)ts->tv_nsec >= 1000000000L)
return false;
return true;
}
# 120 "./include/linux/time32.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 timespec_to_ns(const struct timespec *ts)
{
return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
}
extern struct timespec ns_to_timespec(const s64 nsec);
# 141 "./include/linux/time32.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void timespec_add_ns(struct timespec *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
a->tv_nsec = ns;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long mktime(const unsigned int year,
const unsigned int mon, const unsigned int day,
const unsigned int hour, const unsigned int min,
const unsigned int sec)
{
return mktime64(year, mon, day, hour, min, sec);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timeval_valid(const struct timeval *tv)
{
if (tv->tv_sec < 0)
return false;
if (tv->tv_usec < 0 || tv->tv_usec >= 1000000L)
return false;
return true;
}
# 175 "./include/linux/time32.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 timeval_to_ns(const struct timeval *tv)
{
return ((s64) tv->tv_sec * 1000000000L) +
tv->tv_usec * 1000L;
}
extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
# 74 "./include/linux/time.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool itimerspec64_valid(const struct itimerspec64 *its)
{
if (!timespec64_valid(&(its->it_interval)) ||
!timespec64_valid(&(its->it_value)))
return false;
return true;
}
# 25 "./include/linux/ktime.h" 2
# 1 "./include/linux/jiffies.h" 1
# 10 "./include/linux/jiffies.h"
# 1 "./include/linux/timex.h" 1
# 56 "./include/linux/timex.h"
# 1 "./include/uapi/linux/timex.h" 1
# 64 "./include/uapi/linux/timex.h"
struct timex {
unsigned int modes;
__kernel_long_t offset;
__kernel_long_t freq;
__kernel_long_t maxerror;
__kernel_long_t esterror;
int status;
__kernel_long_t constant;
__kernel_long_t precision;
__kernel_long_t tolerance;
struct timeval time;
__kernel_long_t tick;
__kernel_long_t ppsfreq;
__kernel_long_t jitter;
int shift;
__kernel_long_t stabil;
__kernel_long_t jitcnt;
__kernel_long_t calcnt;
__kernel_long_t errcnt;
__kernel_long_t stbcnt;
int tai;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32;
};
# 57 "./include/linux/timex.h" 2
# 1 "./include/uapi/linux/param.h" 1
# 1 "./arch/x86/include/uapi/asm/param.h" 1
# 1 "./include/asm-generic/param.h" 1
# 1 "./include/uapi/asm-generic/param.h" 1
# 6 "./include/asm-generic/param.h" 2
# 2 "./arch/x86/include/uapi/asm/param.h" 2
# 6 "./include/uapi/linux/param.h" 2
# 64 "./include/linux/timex.h" 2
# 1 "./arch/x86/include/asm/timex.h" 1
# 1 "./arch/x86/include/asm/tsc.h" 1
# 16 "./arch/x86/include/asm/tsc.h"
typedef unsigned long long cycles_t;
extern unsigned int cpu_khz;
extern unsigned int tsc_khz;
extern void disable_TSC(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) cycles_t get_cycles(void)
{
return rdtsc();
}
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
extern void tsc_early_init(void);
extern void tsc_init(void);
extern unsigned long calibrate_delay_is_known(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
extern int tsc_clocksource_reliable;
extern bool tsc_async_resets;
# 59 "./arch/x86/include/asm/tsc.h"
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
unsigned long cpu_khz_from_msr(void);
# 7 "./arch/x86/include/asm/timex.h" 2
# 66 "./include/linux/timex.h" 2
# 139 "./include/linux/timex.h"
extern unsigned long tick_usec;
extern unsigned long tick_nsec;
# 154 "./include/linux/timex.h"
extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
void ntp_notify_cmos_timer(void);
# 11 "./include/linux/jiffies.h" 2
# 1 "./arch/x86/include/uapi/asm/param.h" 1
# 12 "./include/linux/jiffies.h" 2
# 1 "./include/generated/timeconst.h" 1
# 13 "./include/linux/jiffies.h" 2
# 60 "./include/linux/jiffies.h"
extern int register_refined_jiffies(long clock_tick_rate);
# 80 "./include/linux/jiffies.h"
extern u64 __attribute__((__aligned__((1 << (6))), __section__(".data..cacheline_aligned"))) jiffies_64;
extern unsigned long volatile __attribute__((__aligned__((1 << (6))), __section__(".data..cacheline_aligned"))) jiffies;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 get_jiffies_64(void)
{
return (u64)jiffies;
}
# 190 "./include/linux/jiffies.h"
extern unsigned long preset_lpj;
# 291 "./include/linux/jiffies.h"
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * 1000L;
}
extern u64 jiffies64_to_nsecs(u64 j);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long _msecs_to_jiffies(const unsigned int m)
{
return (m + (1000L / 1000) - 1) / (1000L / 1000);
}
# 363 "./include/linux/jiffies.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
return ((((long)(~0UL>>1)) >> 1)-1);
return _msecs_to_jiffies(m);
} else {
return __msecs_to_jiffies(m);
}
}
extern unsigned long __usecs_to_jiffies(const unsigned int u);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (1000000L / 1000) - 1) / (1000000L / 1000);
}
# 410 "./include/linux/jiffies.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(((((long)(~0UL>>1)) >> 1)-1)))
return ((((long)(~0UL>>1)) >> 1)-1);
return _usecs_to_jiffies(u);
} else {
return __usecs_to_jiffies(u);
}
}
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long timespec_to_jiffies(const struct timespec *value)
{
struct timespec64 ts = timespec_to_timespec64(*value);
return timespec64_to_jiffies(&ts);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value)
{
struct timespec64 ts;
jiffies_to_timespec64(jiffies, &ts);
*value = timespec64_to_timespec(ts);
}
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x2 = (0L); typeof(delta) __UNIQUE_ID___y3 = (delta); ((__UNIQUE_ID___x2) > (__UNIQUE_ID___y3) ? (__UNIQUE_ID___x2) : (__UNIQUE_ID___y3)); })));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int jiffies_delta_to_msecs(long delta)
{
return jiffies_to_msecs(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x4 = (0L); typeof(delta) __UNIQUE_ID___y5 = (delta); ((__UNIQUE_ID___x4) > (__UNIQUE_ID___y5) ? (__UNIQUE_ID___x4) : (__UNIQUE_ID___y5)); })));
}
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
extern u64 nsecs_to_jiffies64(u64 n);
extern unsigned long nsecs_to_jiffies(u64 n);
# 26 "./include/linux/ktime.h" 2
typedef s64 ktime_t;
# 37 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
if (__builtin_expect(!!(secs >= (((s64)~((u64)1 << 63)) / 1000000000L)), 0))
return ((s64)~((u64)1 << 63));
return secs * 1000000000L + (s64)nsecs;
}
# 70 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t timespec_to_ktime(struct timespec ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t timeval_to_ktime(struct timeval tv)
{
return ktime_set(tv.tv_sec, tv.tv_usec * 1000L);
}
# 97 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_ns(const ktime_t kt)
{
return kt;
}
# 112 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
{
if (cmp1 < cmp2)
return -1;
if (cmp1 > cmp2)
return 1;
return 0;
}
# 128 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) > 0;
}
# 140 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) < 0;
}
# 165 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_divns(const ktime_t kt, s64 div)
{
({ int __ret_warn_on = !!(div < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("./include/linux/ktime.h"), "i" (171), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (6)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return kt / div;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_us(const ktime_t kt)
{
return ktime_divns(kt, 1000L);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_ms(const ktime_t kt)
{
return ktime_divns(kt, 1000000L);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_us(((later) - (earlier)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_ms(((later) - (earlier)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ((kt) + (usec * 1000L));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
{
return ((kt) + (msec * 1000000L));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
{
return ((kt) - (usec * 1000L));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
{
return ((kt) - (msec * 1000000L));
}
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
# 226 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool ktime_to_timespec_cond(const ktime_t kt,
struct timespec *ts)
{
if (kt) {
*ts = ns_to_timespec((kt));
return true;
} else {
return false;
}
}
# 245 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool ktime_to_timespec64_cond(const ktime_t kt,
struct timespec64 *ts)
{
if (kt) {
*ts = ns_to_timespec64((kt));
return true;
} else {
return false;
}
}
# 265 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ns_to_ktime(u64 ns)
{
return ns;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ms_to_ktime(u64 ms)
{
return ms * 1000000L;
}
# 1 "./include/linux/timekeeping.h" 1
void timekeeping_init(void);
extern int timekeeping_suspended;
extern void update_process_times(int user);
extern void xtime_update(unsigned long ticks);
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
# 41 "./include/linux/timekeeping.h"
extern void ktime_get_raw_ts64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
extern void ktime_get_real_ts64(struct timespec64 *tv);
extern void ktime_get_coarse_ts64(struct timespec64 *ts);
extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);
void getboottime64(struct timespec64 *ts);
extern time64_t ktime_get_seconds(void);
extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);
enum tk_offsets {
TK_OFFS_REAL,
TK_OFFS_BOOT,
TK_OFFS_TAI,
TK_OFFS_MAX,
};
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
extern u32 ktime_get_resolution_ns(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_real(void)
{
return ktime_get_with_offset(TK_OFFS_REAL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_real(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_REAL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_boottime(void)
{
return ktime_get_with_offset(TK_OFFS_BOOT);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_boottime(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_BOOT);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_clocktai(void)
{
return ktime_get_with_offset(TK_OFFS_TAI);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_clocktai(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_TAI);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_mono_to_real(ktime_t mono)
{
return ktime_mono_to_any(mono, TK_OFFS_REAL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_ns(void)
{
return ktime_to_ns(ktime_get());
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_real_ns(void)
{
return ktime_to_ns(ktime_get_real());
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_boot_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_tai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
}
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_boottime_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_boottime()));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_coarse_boottime_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_coarse_boottime()));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) time64_t ktime_get_boottime_seconds(void)
{
return ktime_divns(ktime_get_coarse_boottime(), 1000000000L);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_clocktai_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_clocktai()));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_coarse_clocktai()));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) time64_t ktime_get_clocktai_seconds(void)
{
return ktime_divns(ktime_get_coarse_clocktai(), 1000000000L);
}
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
# 206 "./include/linux/timekeeping.h"
struct system_time_snapshot {
u64 cycles;
ktime_t real;
ktime_t raw;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
};
# 221 "./include/linux/timekeeping.h"
struct system_device_crosststamp {
ktime_t device;
ktime_t sys_realtime;
ktime_t sys_monoraw;
};
# 234 "./include/linux/timekeeping.h"
struct system_counterval_t {
u64 cycles;
struct clocksource *cs;
};
extern int get_device_system_crosststamp(
int (*get_time_fn)(ktime_t *device_time,
struct system_counterval_t *system_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history,
struct system_device_crosststamp *xtstamp);
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
# 276 "./include/linux/ktime.h" 2
# 1 "./include/linux/timekeeping32.h" 1
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_seconds(void)
{
return ktime_get_real_seconds();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void getnstimeofday(struct timespec *ts)
{
struct timespec64 ts64;
ktime_get_real_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_ts(struct timespec *ts)
{
struct timespec64 ts64;
ktime_get_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void getrawmonotonic(struct timespec *ts)
{
struct timespec64 ts64;
ktime_get_raw_ts64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void getboottime(struct timespec *ts)
{
struct timespec64 ts64;
getboottime64(&ts64);
*ts = timespec64_to_timespec(ts64);
}
# 277 "./include/linux/ktime.h" 2
# 7 "./include/linux/timer.h" 2
# 1 "./include/linux/debugobjects.h" 1
enum debug_obj_state {
ODEBUG_STATE_NONE,
ODEBUG_STATE_INIT,
ODEBUG_STATE_INACTIVE,
ODEBUG_STATE_ACTIVE,
ODEBUG_STATE_DESTROYED,
ODEBUG_STATE_NOTAVAILABLE,
ODEBUG_STATE_MAX,
};
struct debug_obj_descr;
# 28 "./include/linux/debugobjects.h"
struct debug_obj {
struct hlist_node node;
enum debug_obj_state state;
unsigned int astate;
void *object;
struct debug_obj_descr *descr;
};
# 55 "./include/linux/debugobjects.h"
struct debug_obj_descr {
const char *name;
void *(*debug_hint)(void *addr);
bool (*is_static_object)(void *addr);
bool (*fixup_init)(void *addr, enum debug_obj_state state);
bool (*fixup_activate)(void *addr, enum debug_obj_state state);
bool (*fixup_destroy)(void *addr, enum debug_obj_state state);
bool (*fixup_free)(void *addr, enum debug_obj_state state);
bool (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
# 88 "./include/linux/debugobjects.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_init (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_free (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void debug_objects_early_init(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void debug_objects_mem_init(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
debug_check_no_obj_freed(const void *address, unsigned long size) { }
# 9 "./include/linux/timer.h" 2
struct timer_list {
struct hlist_node entry;
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
};
# 79 "./include/linux/timer.h"
void init_timer_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_timer_on_stack_key(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
init_timer_key(timer, func, flags, name, key);
}
# 138 "./include/linux/timer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_timer_on_stack(struct timer_list *timer) { }
# 154 "./include/linux/timer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timer_pending(const struct timer_list * timer)
{
return timer->entry.pprev != ((void *)0);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int timer_reduce(struct timer_list *timer, unsigned long expires);
extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
extern int del_timer_sync(struct timer_list *timer);
extern void init_timers(void);
extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
struct ctl_table;
extern unsigned int sysctl_timer_migration;
int timer_migration_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos);
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
int timers_prepare_cpu(unsigned int cpu);
int timers_dead_cpu(unsigned int cpu);
# 10 "./include/linux/workqueue.h" 2
struct workqueue_struct;
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
void delayed_work_timer_fn(struct timer_list *t);
enum {
WORK_STRUCT_PENDING_BIT = 0,
WORK_STRUCT_DELAYED_BIT = 1,
WORK_STRUCT_PWQ_BIT = 2,
WORK_STRUCT_LINKED_BIT = 3,
WORK_STRUCT_COLOR_SHIFT = 4,
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
WORK_STRUCT_STATIC = 0,
WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
WORK_NO_COLOR = WORK_NR_COLORS,
WORK_CPU_UNBOUND = 64,
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
WORK_OFFQ_FLAG_BITS = 1,
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
WORK_OFFQ_LEFT = 64 - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,
WORKER_DESC_LEN = 24,
};
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
};
struct delayed_work {
struct work_struct work;
struct timer_list timer;
struct workqueue_struct *wq;
int cpu;
};
struct rcu_work {
struct work_struct work;
struct callback_head rcu;
struct workqueue_struct *wq;
};
struct workqueue_attrs {
int nice;
cpumask_var_t cpumask;
# 155 "./include/linux/workqueue.h"
bool no_numa;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct delayed_work *to_delayed_work(struct work_struct *work)
{
return ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_160(void) ; if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct delayed_work *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_160(); } while (0); ((struct delayed_work *)(__mptr - __builtin_offsetof(struct delayed_work, work))); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rcu_work *to_rcu_work(struct work_struct *work)
{
return ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_165(void) ; if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct rcu_work *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_165(); } while (0); ((struct rcu_work *)(__mptr - __builtin_offsetof(struct rcu_work, work))); });
}
struct execute_work {
struct work_struct work;
};
# 215 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __init_work(struct work_struct *work, int onstack) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_work_on_stack(struct work_struct *work) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_delayed_work_on_stack(struct delayed_work *work) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int work_static(struct work_struct *work) { return 0; }
# 308 "./include/linux/workqueue.h"
enum {
WQ_UNBOUND = 1 << 1,
WQ_FREEZABLE = 1 << 2,
WQ_MEM_RECLAIM = 1 << 3,
WQ_HIGHPRI = 1 << 4,
WQ_CPU_INTENSIVE = 1 << 5,
WQ_SYSFS = 1 << 6,
# 341 "./include/linux/workqueue.h"
WQ_POWER_EFFICIENT = 1 << 7,
__WQ_DRAINING = 1 << 16,
__WQ_ORDERED = 1 << 17,
__WQ_LEGACY = 1 << 18,
__WQ_ORDERED_EXPLICIT = 1 << 19,
WQ_MAX_ACTIVE = 512,
WQ_MAX_UNBOUND_PER_CPU = 4,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
# 385 "./include/linux/workqueue.h"
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
struct lock_class_key *key, const char *lock_name, ...) __attribute__((__format__(printf, 1, 6)));
# 456 "./include/linux/workqueue.h"
extern void destroy_workqueue(struct workqueue_struct *wq);
struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern bool flush_rcu_work(struct rcu_work *rwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __attribute__((__format__(printf, 1, 2))) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_workqueue_state(void);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
# 509 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
return queue_work_on(WORK_CPU_UNBOUND, wq, work);
}
# 523 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
# 538 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
# 552 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, system_wq, work);
}
# 568 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_work(struct work_struct *work)
{
return queue_work(system_wq, work);
}
# 597 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_scheduled_work(void)
{
flush_workqueue(system_wq);
}
# 611 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(cpu, system_wq, dwork, delay);
}
# 625 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(system_wq, dwork, delay);
}
# 641 "./include/linux/workqueue.h"
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
int workqueue_sysfs_register(struct workqueue_struct *wq);
# 661 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wq_watchdog_touch(int cpu) { }
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) workqueue_init_early(void);
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) workqueue_init(void);
# 16 "./include/linux/rhashtable-types.h" 2
struct rhash_head {
struct rhash_head *next;
};
struct rhlist_head {
struct rhash_head rhead;
struct rhlist_head *next;
};
struct bucket_table;
struct rhashtable_compare_arg {
struct rhashtable *ht;
const void *key;
};
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
const void *obj);
# 57 "./include/linux/rhashtable-types.h"
struct rhashtable_params {
u16 nelem_hint;
u16 key_len;
u16 key_offset;
u16 head_offset;
unsigned int max_size;
u16 min_size;
bool automatic_shrinking;
u8 locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
};
# 83 "./include/linux/rhashtable-types.h"
struct rhashtable {
struct bucket_table *tbl;
unsigned int key_len;
unsigned int max_elems;
struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
};
struct rhltable {
struct rhashtable ht;
};
struct rhashtable_walker {
struct list_head list;
struct bucket_table *tbl;
};
# 122 "./include/linux/rhashtable-types.h"
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
bool end_of_table;
};
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt,
const struct rhashtable_params *params);
# 8 "./include/linux/ipc.h" 2
# 1 "./include/uapi/linux/ipc.h" 1
# 10 "./include/uapi/linux/ipc.h"
struct ipc_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
};
# 1 "./arch/x86/include/uapi/asm/ipcbuf.h" 1
# 1 "./include/uapi/asm-generic/ipcbuf.h" 1
# 20 "./include/uapi/asm-generic/ipcbuf.h"
struct ipc64_perm {
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
unsigned short seq;
unsigned short __pad2;
__kernel_ulong_t __unused1;
__kernel_ulong_t __unused2;
};
# 2 "./arch/x86/include/uapi/asm/ipcbuf.h" 2
# 23 "./include/uapi/linux/ipc.h" 2
# 58 "./include/uapi/linux/ipc.h"
struct ipc_kludge {
struct msgbuf *msgp;
long msgtyp;
};
# 9 "./include/linux/ipc.h" 2
# 1 "./include/linux/refcount.h" 1
struct mutex;
# 19 "./include/linux/refcount.h"
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
# 30 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
}
extern __attribute__((__warn_unused_result__)) bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
extern void refcount_add_checked(unsigned int i, refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_inc_not_zero_checked(refcount_t *r);
extern void refcount_inc_checked(refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_test_checked(refcount_t *r);
extern void refcount_dec_checked(refcount_t *r);
# 72 "./include/linux/refcount.h"
# 1 "./arch/x86/include/asm/refcount.h" 1
# 1 "./include/linux/refcount.h" 1
# 8 "./arch/x86/include/asm/refcount.h" 2
# 42 "./arch/x86/include/asm/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addl %1,%0\n\t"
"js 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n"
: [var] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void refcount_inc(refcount_t *r)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incl %0\n\t"
"js 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n"
: [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void refcount_dec(refcount_t *r)
{
asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl %0\n\t"
"jz 111f\n\t" "js 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n"
: [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subl" " %[val], %[var]\n\t" "js 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n" "; j" "e" " %l[cc_label]" : : [var] "m" (r->refs.counter), [val] "er" (i) : "memory","cx" : cc_label); if (0) { cc_label: c = true; } c; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) bool refcount_dec_and_test(refcount_t *r)
{
return ({ bool c = false; asm goto(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl" " %[var]\n\t" "js 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n" "; j" "e" " %l[cc_label]" : : [var] "m" (r->refs.counter) : "memory","cx" : cc_label); if (0) { cc_label: c = true; } c; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
int c, result;
c = atomic_read(&(r->refs));
do {
if (__builtin_expect(!!(c == 0), 0))
return false;
result = c + i;
if (__builtin_expect(!!(c < 0 || c == ((int)(~0U>>1)) || result < c), 0)) {
asm volatile("jmp 111f\n\t" ".pushsection .text..refcount\n" "111:\tlea %[var], %%" "rcx" "\n" "112:\t" ".byte 0x0f, 0x0b" "\n" "999:\n\t" ".pushsection .discard.unreachable\n\t" ".long 999b - .\n\t" ".popsection\n\t" ".popsection\n" "113:\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "112b" ") - .\n" " .long (" "113b" ") - .\n" " .long (" "ex_handler_refcount" ") - .\n" " .popsection\n"
: : [var] "m" (r->refs.counter)
: "cc", "cx");
break;
}
} while (!atomic_try_cmpxchg(&(r->refs), &c, result));
return c != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) bool refcount_inc_not_zero(refcount_t *r)
{
return refcount_add_not_zero(1, r);
}
# 73 "./include/linux/refcount.h" 2
# 111 "./include/linux/refcount.h"
extern __attribute__((__warn_unused_result__)) bool refcount_dec_if_one(refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_not_one(refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags);
# 10 "./include/linux/ipc.h" 2
struct kern_ipc_perm {
spinlock_t lock;
bool deleted;
int id;
key_t key;
kuid_t uid;
kgid_t gid;
kuid_t cuid;
kgid_t cgid;
umode_t mode;
unsigned long seq;
void *security;
struct rhash_head khtnode;
struct callback_head rcu;
refcount_t refcount;
} __attribute__((__aligned__((1 << (6))))) ;
# 6 "./include/uapi/linux/sem.h" 2
# 25 "./include/uapi/linux/sem.h"
struct semid_ds {
struct ipc_perm sem_perm;
__kernel_time_t sem_otime;
__kernel_time_t sem_ctime;
struct sem *sem_base;
struct sem_queue *sem_pending;
struct sem_queue **sem_pending_last;
struct sem_undo *undo;
unsigned short sem_nsems;
};
# 1 "./arch/x86/include/uapi/asm/sembuf.h" 1
# 16 "./arch/x86/include/uapi/asm/sembuf.h"
struct semid64_ds {
struct ipc64_perm sem_perm;
__kernel_time_t sem_otime;
__kernel_ulong_t __unused1;
__kernel_time_t sem_ctime;
__kernel_ulong_t __unused2;
__kernel_ulong_t sem_nsems;
__kernel_ulong_t __unused3;
__kernel_ulong_t __unused4;
};
# 38 "./include/uapi/linux/sem.h" 2
struct sembuf {
unsigned short sem_num;
short sem_op;
short sem_flg;
};
union semun {
int val;
struct semid_ds *buf;
unsigned short *array;
struct seminfo *__buf;
void *__pad;
};
struct seminfo {
int semmap;
int semmni;
int semmns;
int semmnu;
int semmsl;
int semopm;
int semume;
int semusz;
int semvmx;
int semaem;
};
# 6 "./include/linux/sem.h" 2
struct task_struct;
struct sem_undo_list;
struct sysv_sem {
struct sem_undo_list *undo_list;
};
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
# 16 "./include/linux/sched.h" 2
# 1 "./include/linux/shm.h" 1
# 1 "./include/uapi/linux/shm.h" 1
# 1 "./include/uapi/asm-generic/hugetlb_encode.h" 1
# 8 "./include/uapi/linux/shm.h" 2
# 28 "./include/uapi/linux/shm.h"
struct shmid_ds {
struct ipc_perm shm_perm;
int shm_segsz;
__kernel_time_t shm_atime;
__kernel_time_t shm_dtime;
__kernel_time_t shm_ctime;
__kernel_ipc_pid_t shm_cpid;
__kernel_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
unsigned short shm_unused;
void *shm_unused2;
void *shm_unused3;
};
# 1 "./arch/x86/include/uapi/asm/shmbuf.h" 1
# 1 "./include/uapi/asm-generic/shmbuf.h" 1
# 25 "./include/uapi/asm-generic/shmbuf.h"
struct shmid64_ds {
struct ipc64_perm shm_perm;
size_t shm_segsz;
__kernel_time_t shm_atime;
__kernel_time_t shm_dtime;
__kernel_time_t shm_ctime;
# 40 "./include/uapi/asm-generic/shmbuf.h"
__kernel_pid_t shm_cpid;
__kernel_pid_t shm_lpid;
unsigned long shm_nattch;
unsigned long __unused4;
unsigned long __unused5;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
# 7 "./arch/x86/include/uapi/asm/shmbuf.h" 2
# 44 "./include/uapi/linux/shm.h" 2
# 93 "./include/uapi/linux/shm.h"
struct shminfo {
int shmmax;
int shmmin;
int shmmni;
int shmseg;
int shmall;
};
struct shm_info {
int used_ids;
__kernel_ulong_t shm_tot;
__kernel_ulong_t shm_rss;
__kernel_ulong_t shm_swp;
__kernel_ulong_t swap_attempts;
__kernel_ulong_t swap_successes;
};
# 8 "./include/linux/shm.h" 2
# 1 "./arch/x86/include/asm/shmparam.h" 1
# 9 "./include/linux/shm.h" 2
struct file;
struct sysv_shm {
struct list_head shm_clist;
};
long do_shmat(int shmid, char *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
# 17 "./include/linux/sched.h" 2
# 1 "./include/linux/kcov.h" 1
# 1 "./include/uapi/linux/kcov.h" 1
# 11 "./include/uapi/linux/kcov.h"
enum {
# 20 "./include/uapi/linux/kcov.h"
KCOV_TRACE_PC = 0,
KCOV_TRACE_CMP = 1,
};
# 6 "./include/linux/kcov.h" 2
struct task_struct;
# 42 "./include/linux/kcov.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_task_init(struct task_struct *t) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_task_exit(struct task_struct *t) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_prepare_switch(struct task_struct *t) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_finish_switch(struct task_struct *t) {}
# 18 "./include/linux/sched.h" 2
# 1 "./include/linux/plist.h" 1
# 81 "./include/linux/plist.h"
struct plist_head {
struct list_head node_list;
};
struct plist_node {
int prio;
struct list_head prio_list;
struct list_head node_list;
};
# 123 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
plist_head_init(struct plist_head *head)
{
INIT_LIST_HEAD(&head->node_list);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void plist_node_init(struct plist_node *node, int prio)
{
node->prio = prio;
INIT_LIST_HEAD(&node->prio_list);
INIT_LIST_HEAD(&node->node_list);
}
extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);
extern void plist_requeue(struct plist_node *node, struct plist_head *head);
# 212 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int plist_head_empty(const struct plist_head *head)
{
return list_empty(&head->node_list);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int plist_node_empty(const struct plist_node *node)
{
return list_empty(&node->node_list);
}
# 282 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct plist_node *plist_first(const struct plist_head *head)
{
return ({ void *__mptr = (void *)(head->node_list.next); do { extern void __compiletime_assert_285(void) ; if (!(!(!__builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(((struct plist_node *)0)->node_list)) && !__builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(void))))) __compiletime_assert_285(); } while (0); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct plist_node *plist_last(const struct plist_head *head)
{
return ({ void *__mptr = (void *)(head->node_list.prev); do { extern void __compiletime_assert_297(void) ; if (!(!(!__builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(((struct plist_node *)0)->node_list)) && !__builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(void))))) __compiletime_assert_297(); } while (0); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); });
}
# 20 "./include/linux/sched.h" 2
# 1 "./include/linux/hrtimer.h" 1
# 15 "./include/linux/hrtimer.h"
# 1 "./include/linux/rbtree.h" 1
# 36 "./include/linux/rbtree.h"
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
struct rb_root {
struct rb_node *rb_node;
};
# 57 "./include/linux/rbtree.h"
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
# 77 "./include/linux/rbtree.h"
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
extern void rb_insert_color_cached(struct rb_node *,
struct rb_root_cached *, bool);
extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = ((void *)0);
*rb_link = node;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = ((void *)0);
({ uintptr_t _r_a_p__v = (uintptr_t)(node); if (__builtin_constant_p(node) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof((*rb_link)) __val; char __c[1]; } __u = { .__val = ( typeof((*rb_link))) ((typeof(*rb_link))(_r_a_p__v)) }; __write_once_size(&((*rb_link)), __u.__c, sizeof((*rb_link))); __u.__val; }); else do { do { extern void __compiletime_assert_120(void) ; if (!((sizeof(*&*rb_link) == sizeof(char) || sizeof(*&*rb_link) == sizeof(short) || sizeof(*&*rb_link) == sizeof(int) || sizeof(*&*rb_link) == sizeof(long)))) __compiletime_assert_120(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&*rb_link) __val; char __c[1]; } __u = { .__val = ( typeof(*&*rb_link)) ((typeof(*((typeof(*rb_link))_r_a_p__v)) *)((typeof(*rb_link))_r_a_p__v)) }; __write_once_size(&(*&*rb_link), __u.__c, sizeof(*&*rb_link)); __u.__val; }); } while (0); _r_a_p__v; });
}
# 16 "./include/linux/hrtimer.h" 2
# 1 "./include/linux/percpu.h" 1
# 1 "./include/linux/mmdebug.h" 1
struct page;
struct vm_area_struct;
struct mm_struct;
extern void dump_page(struct page *page, const char *reason);
extern void __dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
# 6 "./include/linux/percpu.h" 2
# 1 "./include/linux/smp.h" 1
# 15 "./include/linux/smp.h"
# 1 "./include/linux/llist.h" 1
# 66 "./include/linux/llist.h"
struct llist_head {
struct llist_node *first;
};
struct llist_node {
struct llist_node *next;
};
# 81 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_llist_head(struct llist_head *list)
{
list->first = ((void *)0);
}
# 199 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool llist_empty(const struct llist_head *head)
{
return ({ union { typeof(head->first) __val; char __c[1]; } __u; if (1) __read_once_size(&(head->first), __u.__c, sizeof(head->first)); else __read_once_size_nocheck(&(head->first), __u.__c, sizeof(head->first)); do { } while (0); __u.__val; }) == ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct llist_node *llist_next(struct llist_node *node)
{
return node->next;
}
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool llist_add(struct llist_node *new, struct llist_head *head)
{
return llist_add_batch(new, new, head);
}
# 232 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct llist_node *llist_del_all(struct llist_head *head)
{
return ({ typeof(&head->first) __ai_ptr = (&head->first); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((((void *)0)))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); });
}
extern struct llist_node *llist_del_first(struct llist_head *head);
struct llist_node *llist_reverse_order(struct llist_node *head);
# 16 "./include/linux/smp.h" 2
typedef void (*smp_call_func_t)(void *info);
struct __call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
unsigned int flags;
};
typedef struct __call_single_data call_single_data_t
__attribute__((__aligned__(sizeof(struct __call_single_data))));
extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
int on_each_cpu(smp_call_func_t func, void *info, int wait);
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait);
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags, const struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
# 1 "./arch/x86/include/asm/smp.h" 1
# 12 "./arch/x86/include/asm/smp.h"
# 1 "./arch/x86/include/asm/mpspec.h" 1
# 1 "./arch/x86/include/asm/mpspec_def.h" 1
# 22 "./arch/x86/include/asm/mpspec_def.h"
struct mpf_intel {
char signature[4];
unsigned int physptr;
unsigned char length;
unsigned char specification;
unsigned char checksum;
unsigned char feature1;
unsigned char feature2;
unsigned char feature3;
unsigned char feature4;
unsigned char feature5;
};
struct mpc_table {
char signature[4];
unsigned short length;
char spec;
char checksum;
char oem[8];
char productid[12];
unsigned int oemptr;
unsigned short oemsize;
unsigned short oemcount;
unsigned int lapic;
unsigned int reserved;
};
# 68 "./arch/x86/include/asm/mpspec_def.h"
struct mpc_cpu {
unsigned char type;
unsigned char apicid;
unsigned char apicver;
unsigned char cpuflag;
unsigned int cpufeature;
unsigned int featureflag;
unsigned int reserved[2];
};
struct mpc_bus {
unsigned char type;
unsigned char busid;
unsigned char bustype[6];
};
# 106 "./arch/x86/include/asm/mpspec_def.h"
struct mpc_ioapic {
unsigned char type;
unsigned char apicid;
unsigned char apicver;
unsigned char flags;
unsigned int apicaddr;
};
struct mpc_intsrc {
unsigned char type;
unsigned char irqtype;
unsigned short irqflag;
unsigned char srcbus;
unsigned char srcbusirq;
unsigned char dstapic;
unsigned char dstirq;
};
enum mp_irq_source_types {
mp_INT = 0,
mp_NMI = 1,
mp_SMI = 2,
mp_ExtINT = 3
};
# 145 "./arch/x86/include/asm/mpspec_def.h"
struct mpc_lintsrc {
unsigned char type;
unsigned char irqtype;
unsigned short irqflag;
unsigned char srcbusid;
unsigned char srcbusirq;
unsigned char destapic;
unsigned char destapiclint;
};
struct mpc_oemtable {
char signature[4];
unsigned short length;
char rev;
char checksum;
char mpc[8];
};
# 177 "./arch/x86/include/asm/mpspec_def.h"
enum mp_bustype {
MP_BUS_ISA = 1,
MP_BUS_EISA,
MP_BUS_PCI,
};
# 7 "./arch/x86/include/asm/mpspec.h" 2
# 1 "./arch/x86/include/asm/x86_init.h" 1
struct mpc_bus;
struct mpc_cpu;
struct mpc_table;
struct cpuinfo_x86;
# 23 "./arch/x86/include/asm/x86_init.h"
struct x86_init_mpparse {
void (*mpc_record)(unsigned int mode);
void (*setup_ioapic_ids)(void);
int (*mpc_apic_id)(struct mpc_cpu *m);
void (*smp_read_mpc_oem)(struct mpc_table *mpc);
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
void (*find_smp_config)(void);
void (*get_smp_config)(unsigned int early);
};
# 42 "./arch/x86/include/asm/x86_init.h"
struct x86_init_resources {
void (*probe_roms)(void);
void (*reserve_resources)(void);
char *(*memory_setup)(void);
};
# 56 "./arch/x86/include/asm/x86_init.h"
struct x86_init_irqs {
void (*pre_vector_init)(void);
void (*intr_init)(void);
void (*trap_init)(void);
void (*intr_mode_init)(void);
};
struct x86_init_oem {
void (*arch_setup)(void);
void (*banner)(void);
};
# 80 "./arch/x86/include/asm/x86_init.h"
struct x86_init_paging {
void (*pagetable_init)(void);
};
# 91 "./arch/x86/include/asm/x86_init.h"
struct x86_init_timers {
void (*setup_percpu_clockev)(void);
void (*timer_init)(void);
void (*wallclock_init)(void);
};
struct x86_init_iommu {
int (*iommu_init)(void);
};
# 112 "./arch/x86/include/asm/x86_init.h"
struct x86_init_pci {
int (*arch_init)(void);
int (*init)(void);
void (*init_irq)(void);
void (*fixup_irqs)(void);
};
# 127 "./arch/x86/include/asm/x86_init.h"
struct x86_hyper_init {
void (*init_platform)(void);
void (*guest_late_init)(void);
bool (*x2apic_available)(void);
void (*init_mem_mapping)(void);
void (*init_after_bootmem)(void);
};
struct x86_init_acpi {
u64 (*get_root_pointer)(void);
void (*reduced_hw_early_init)(void);
};
struct x86_init_ops {
struct x86_init_resources resources;
struct x86_init_mpparse mpparse;
struct x86_init_irqs irqs;
struct x86_init_oem oem;
struct x86_init_paging paging;
struct x86_init_timers timers;
struct x86_init_iommu iommu;
struct x86_init_pci pci;
struct x86_hyper_init hyper;
struct x86_init_acpi acpi;
};
struct x86_cpuinit_ops {
void (*setup_percpu_clockev)(void);
void (*early_percpu_clock_init)(void);
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
};
struct timespec64;
# 194 "./arch/x86/include/asm/x86_init.h"
struct x86_legacy_devices {
int pnpbios;
};
# 207 "./arch/x86/include/asm/x86_init.h"
enum x86_legacy_i8042_state {
X86_LEGACY_I8042_PLATFORM_ABSENT,
X86_LEGACY_I8042_FIRMWARE_ABSENT,
X86_LEGACY_I8042_EXPECTED_PRESENT,
};
# 225 "./arch/x86/include/asm/x86_init.h"
struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc;
int warm_reset;
int no_vga;
int reserve_bios_regions;
struct x86_legacy_devices devices;
};
struct x86_hyper_runtime {
void (*pin_vcpu)(int cpu);
};
# 264 "./arch/x86/include/asm/x86_init.h"
struct x86_platform_ops {
unsigned long (*calibrate_cpu)(void);
unsigned long (*calibrate_tsc)(void);
void (*get_wallclock)(struct timespec64 *ts);
int (*set_wallclock)(const struct timespec64 *ts);
void (*iommu_shutdown)(void);
bool (*is_untracked_pat_range)(u64 start, u64 end);
void (*nmi_init)(void);
unsigned char (*get_nmi_reason)(void);
void (*save_sched_clock_state)(void);
void (*restore_sched_clock_state)(void);
void (*apic_post_init)(void);
struct x86_legacy_features legacy;
void (*set_legacy_features)(void);
struct x86_hyper_runtime hyper;
};
struct pci_dev;
struct x86_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev);
};
struct x86_apic_ops {
unsigned int (*io_apic_read) (unsigned int apic, unsigned int reg);
void (*restore)(void);
};
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
extern struct x86_platform_ops x86_platform;
extern struct x86_msi_ops x86_msi;
extern struct x86_apic_ops x86_apic_ops;
extern void x86_early_init_platform_quirks(void);
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
extern bool x86_pnpbios_disabled(void);
# 8 "./arch/x86/include/asm/mpspec.h" 2
# 1 "./arch/x86/include/asm/apicdef.h" 1
# 179 "./arch/x86/include/asm/apicdef.h"
struct local_apic {
struct { unsigned int __reserved[4]; } __reserved_01;
struct { unsigned int __reserved[4]; } __reserved_02;
struct {
unsigned int __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
unsigned int __reserved[3];
} id;
const
struct {
unsigned int version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
unsigned int __reserved[3];
} version;
struct { unsigned int __reserved[4]; } __reserved_03;
struct { unsigned int __reserved[4]; } __reserved_04;
struct { unsigned int __reserved[4]; } __reserved_05;
struct { unsigned int __reserved[4]; } __reserved_06;
struct {
unsigned int priority : 8,
__reserved_1 : 24;
unsigned int __reserved_2[3];
} tpr;
const
struct {
unsigned int priority : 8,
__reserved_1 : 24;
unsigned int __reserved_2[3];
} apr;
const
struct {
unsigned int priority : 8,
__reserved_1 : 24;
unsigned int __reserved_2[3];
} ppr;
struct {
unsigned int eoi;
unsigned int __reserved[3];
} eoi;
struct { unsigned int __reserved[4]; } __reserved_07;
struct {
unsigned int __reserved_1 : 24,
logical_dest : 8;
unsigned int __reserved_2[3];
} ldr;
struct {
unsigned int __reserved_1 : 28,
model : 4;
unsigned int __reserved_2[3];
} dfr;
struct {
unsigned int spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
unsigned int __reserved_3[3];
} svr;
struct {
unsigned int bitfield;
unsigned int __reserved[3];
} isr [8];
struct {
unsigned int bitfield;
unsigned int __reserved[3];
} tmr [8];
struct {
unsigned int bitfield;
unsigned int __reserved[3];
} irr [8];
union {
struct {
unsigned int send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
unsigned int __reserved_3[3];
} error_bits;
struct {
unsigned int errors;
unsigned int __reserved_3[3];
} all_errors;
} esr;
struct { unsigned int __reserved[4]; } __reserved_08;
struct { unsigned int __reserved[4]; } __reserved_09;
struct { unsigned int __reserved[4]; } __reserved_10;
struct { unsigned int __reserved[4]; } __reserved_11;
struct { unsigned int __reserved[4]; } __reserved_12;
struct { unsigned int __reserved[4]; } __reserved_13;
struct { unsigned int __reserved[4]; } __reserved_14;
struct {
unsigned int vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
unsigned int __reserved_4[3];
} icr1;
struct {
union {
unsigned int __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
unsigned int __reserved_3 : 24,
logical_dest : 8;
} dest;
unsigned int __reserved_4[3];
} icr2;
struct {
unsigned int vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
unsigned int __reserved_4[3];
} lvt_timer;
struct {
unsigned int vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
unsigned int __reserved_4[3];
} lvt_thermal;
struct {
unsigned int vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
unsigned int __reserved_4[3];
} lvt_pc;
struct {
unsigned int vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
unsigned int __reserved_3[3];
} lvt_lint0;
struct {
unsigned int vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
unsigned int __reserved_3[3];
} lvt_lint1;
struct {
unsigned int vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
unsigned int __reserved_4[3];
} lvt_error;
struct {
unsigned int initial_count;
unsigned int __reserved_2[3];
} timer_icr;
const
struct {
unsigned int curr_count;
unsigned int __reserved_2[3];
} timer_ccr;
struct { unsigned int __reserved[4]; } __reserved_16;
struct { unsigned int __reserved[4]; } __reserved_17;
struct { unsigned int __reserved[4]; } __reserved_18;
struct { unsigned int __reserved[4]; } __reserved_19;
struct {
unsigned int divisor : 4,
__reserved_1 : 28;
unsigned int __reserved_2[3];
} timer_dcr;
struct { unsigned int __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
# 435 "./arch/x86/include/asm/apicdef.h"
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
# 9 "./arch/x86/include/asm/mpspec.h" 2
extern int pic_mode;
# 40 "./arch/x86/include/asm/mpspec.h"
extern unsigned long mp_bus_not_pci[(((256) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
extern unsigned int boot_cpu_physical_apicid;
extern u8 boot_cpu_apic_version;
extern unsigned long mp_lapic_addr;
extern int smp_found_config;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_smp_config(void)
{
x86_init.mpparse.get_smp_config(0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void early_get_smp_config(void)
{
x86_init.mpparse.get_smp_config(1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void find_smp_config(void)
{
x86_init.mpparse.find_smp_config();
}
extern void e820__memblock_alloc_reserved_mpc_new(void);
extern int enable_update_mptable;
extern int default_mpc_apic_id(struct mpc_cpu *m);
extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
extern void default_find_smp_config(void);
extern void default_get_smp_config(unsigned int early);
# 89 "./arch/x86/include/asm/mpspec.h"
int generic_processor_info(int apicid, int version);
struct physid_mask {
unsigned long mask[(((32768) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
};
typedef struct physid_mask physid_mask_t;
# 132 "./arch/x86/include/asm/mpspec.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long physids_coerce(physid_mask_t *map)
{
return map->mask[0];
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void physids_promote(unsigned long physids, physid_mask_t *map)
{
bitmap_zero((*map).mask, 32768);
map->mask[0] = physids;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void physid_set_mask_of_physid(int physid, physid_mask_t *map)
{
bitmap_zero((*map).mask, 32768);
set_bit(physid, (*map).mask);
}
extern physid_mask_t phys_cpu_present_map;
# 13 "./arch/x86/include/asm/smp.h" 2
# 1 "./arch/x86/include/asm/apic.h" 1
# 10 "./arch/x86/include/asm/apic.h"
# 1 "./arch/x86/include/asm/fixmap.h" 1
# 29 "./arch/x86/include/asm/fixmap.h"
# 1 "./arch/x86/include/asm/acpi.h" 1
# 26 "./arch/x86/include/asm/acpi.h"
# 1 "./include/acpi/pdc_intel.h" 1
# 27 "./arch/x86/include/asm/acpi.h" 2
# 1 "./arch/x86/include/asm/numa.h" 1
# 1 "./include/linux/nodemask.h" 1
# 96 "./include/linux/nodemask.h"
# 1 "./include/linux/numa.h" 1
# 97 "./include/linux/nodemask.h" 2
typedef struct { unsigned long bits[((((1 << 6)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
# 109 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? (1 << 6) : 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : ((void *)0);
}
# 128 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __node_set(int node, volatile nodemask_t *dstp)
{
set_bit(node, dstp->bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __first_node(const nodemask_t *srcp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 6))) *)1 == (typeof((int)(find_first_bit(srcp->bits, (1 << 6)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 6))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_bit(srcp->bits, (1 << 6)))) * 0l)) : (int *)8))))), (((int)((1 << 6))) < ((int)(find_first_bit(srcp->bits, (1 << 6)))) ? ((int)((1 << 6))) : ((int)(find_first_bit(srcp->bits, (1 << 6))))), ({ typeof((int)((1 << 6))) __UNIQUE_ID___x7 = ((int)((1 << 6))); typeof((int)(find_first_bit(srcp->bits, (1 << 6)))) __UNIQUE_ID___y8 = ((int)(find_first_bit(srcp->bits, (1 << 6)))); ((__UNIQUE_ID___x7) < (__UNIQUE_ID___y8) ? (__UNIQUE_ID___x7) : (__UNIQUE_ID___y8)); }));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __next_node(int n, const nodemask_t *srcp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 6))) *)1 == (typeof((int)(find_next_bit(srcp->bits, (1 << 6), n+1))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 6))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_next_bit(srcp->bits, (1 << 6), n+1))) * 0l)) : (int *)8))))), (((int)((1 << 6))) < ((int)(find_next_bit(srcp->bits, (1 << 6), n+1))) ? ((int)((1 << 6))) : ((int)(find_next_bit(srcp->bits, (1 << 6), n+1)))), ({ typeof((int)((1 << 6))) __UNIQUE_ID___x9 = ((int)((1 << 6))); typeof((int)(find_next_bit(srcp->bits, (1 << 6), n+1))) __UNIQUE_ID___y10 = ((int)(find_next_bit(srcp->bits, (1 << 6), n+1))); ((__UNIQUE_ID___x9) < (__UNIQUE_ID___y10) ? (__UNIQUE_ID___x9) : (__UNIQUE_ID___y10)); }));
}
int __next_node_in(int node, const nodemask_t *srcp);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_nodemask_of_node(nodemask_t *mask, int node)
{
__nodes_clear(&(*mask), (1 << 6));
__node_set((node), &(*mask));
}
# 299 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __first_unset_node(const nodemask_t *maskp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 6))) *)1 == (typeof((int)(find_first_zero_bit(maskp->bits, (1 << 6)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 6))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_zero_bit(maskp->bits, (1 << 6)))) * 0l)) : (int *)8))))), (((int)((1 << 6))) < ((int)(find_first_zero_bit(maskp->bits, (1 << 6)))) ? ((int)((1 << 6))) : ((int)(find_first_zero_bit(maskp->bits, (1 << 6))))), ({ typeof((int)((1 << 6))) __UNIQUE_ID___x11 = ((int)((1 << 6))); typeof((int)(find_first_zero_bit(maskp->bits, (1 << 6)))) __UNIQUE_ID___y12 = ((int)(find_first_zero_bit(maskp->bits, (1 << 6)))); ((__UNIQUE_ID___x11) < (__UNIQUE_ID___y12) ? (__UNIQUE_ID___x11) : (__UNIQUE_ID___y12)); }));
}
# 333 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodemask_parse_user(const char *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
}
# 391 "./include/linux/nodemask.h"
enum node_states {
N_POSSIBLE,
N_ONLINE,
N_NORMAL_MEMORY,
N_HIGH_MEMORY = N_NORMAL_MEMORY,
N_MEMORY,
N_CPU,
NR_NODE_STATES
};
extern nodemask_t node_states[NR_NODE_STATES];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int node_state(int node, enum node_states state)
{
return (__builtin_constant_p(((node))) ? constant_test_bit(((node)), ((node_states[state]).bits)) : variable_test_bit(((node)), ((node_states[state]).bits)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int num_node_state(enum node_states state)
{
return __nodes_weight(&(node_states[state]), (1 << 6));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int next_online_node(int nid)
{
return __next_node((nid), &(node_states[N_ONLINE]));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int next_memory_node(int nid)
{
return __next_node((nid), &(node_states[N_MEMORY]));
}
extern int nr_node_ids;
extern int nr_online_nodes;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
# 497 "./include/linux/nodemask.h"
extern int node_random(const nodemask_t *maskp);
# 531 "./include/linux/nodemask.h"
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
};
# 6 "./arch/x86/include/asm/numa.h" 2
# 1 "./arch/x86/include/asm/topology.h" 1
# 42 "./arch/x86/include/asm/topology.h"
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x86_cpu_to_node_map; extern __typeof__(int) *x86_cpu_to_node_map_early_ptr; extern __typeof__(int) x86_cpu_to_node_map_early_map[];
# 56 "./arch/x86/include/asm/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int early_cpu_to_node(int cpu)
{
return *((x86_cpu_to_node_map_early_ptr) ? &(x86_cpu_to_node_map_early_ptr)[cpu] : &(*({ do { const void *__vpp_verify = (typeof((&(x86_cpu_to_node_map)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(x86_cpu_to_node_map)))) *)((&(x86_cpu_to_node_map)))); (typeof((typeof(*((&(x86_cpu_to_node_map)))) *)((&(x86_cpu_to_node_map))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })));
}
extern cpumask_var_t node_to_cpumask_map[(1 << 6)];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *cpumask_of_node(int node)
{
return node_to_cpumask_map[node];
}
extern void setup_node_to_cpumask_map(void);
extern int __node_distance(int, int);
# 103 "./arch/x86/include/asm/topology.h"
# 1 "./include/asm-generic/topology.h" 1
# 104 "./arch/x86/include/asm/topology.h" 2
extern const struct cpumask *cpu_coregroup_mask(int cpu);
# 115 "./arch/x86/include/asm/topology.h"
extern unsigned int __max_logical_packages;
extern int __max_smt_threads;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int topology_max_smt_threads(void)
{
return __max_smt_threads;
}
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
int topology_phys_to_logical_pkg(unsigned int pkg);
bool topology_is_primary_thread(unsigned int cpu);
bool topology_smt_supported(void);
# 139 "./arch/x86/include/asm/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_fix_phys_package_id(int num, u32 slot)
{
}
struct pci_bus;
int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
extern bool x86_topology_update;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(int) sched_core_priority;
extern unsigned int __attribute__((__section__(".data..read_mostly"))) sysctl_sched_itmt_enabled;
void sched_set_itmt_core_prio(int prio, int core_cpu);
int sched_set_itmt_support(void);
void sched_clear_itmt_support(void);
# 8 "./arch/x86/include/asm/numa.h" 2
# 21 "./arch/x86/include/asm/numa.h"
extern int numa_off;
# 31 "./arch/x86/include/asm/numa.h"
extern s16 __apicid_to_node[32768];
extern nodemask_t numa_nodes_parsed __attribute__((__section__(".init.data")));
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) numa_add_memblk(int nodeid, u64 start, u64 end);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) numa_set_distance(int from, int to, int distance);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_apicid_to_node(int apicid, s16 node)
{
__apicid_to_node[apicid] = node;
}
extern int numa_cpu_node(int cpu);
# 60 "./arch/x86/include/asm/numa.h"
extern void numa_set_node(int cpu, int node);
extern void numa_clear_node(int cpu);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) init_cpu_to_node(void);
extern void numa_add_cpu(int cpu);
extern void numa_remove_cpu(int cpu);
# 29 "./arch/x86/include/asm/acpi.h" 2
# 1 "./arch/x86/include/asm/fixmap.h" 1
# 30 "./arch/x86/include/asm/acpi.h" 2
# 1 "./arch/x86/include/asm/mmu.h" 1
# 1 "./include/linux/rwsem.h" 1
# 23 "./include/linux/rwsem.h"
struct rw_semaphore;
struct rw_semaphore {
atomic_long_t count;
struct list_head wait_list;
raw_spinlock_t wait_lock;
struct optimistic_spin_queue osq;
struct task_struct *owner;
};
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
# 1 "./arch/x86/include/asm/rwsem.h" 1
# 81 "./arch/x86/include/asm/rwsem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __down_read(struct rw_semaphore *sem)
{
({ struct rw_semaphore* ret; asm volatile("# beginning down_read\n\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "incq" " " "(%[sem])\n\t" " jns 1f\n" " call " "call_rwsem_down_read_failed" "\n" "1:\n\t" "# ending down_read\n\t" : "+m" (sem->count), "=a" (ret), "+r" (current_stack_pointer) : [sem] "a" (sem) : "memory", "cc"); ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __down_read_killable(struct rw_semaphore *sem)
{
if (IS_ERR(({ struct rw_semaphore* ret; asm volatile("# beginning down_read\n\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "incq" " " "(%[sem])\n\t" " jns 1f\n" " call " "call_rwsem_down_read_failed_killable" "\n" "1:\n\t" "# ending down_read\n\t" : "+m" (sem->count), "=a" (ret), "+r" (current_stack_pointer) : [sem] "a" (sem) : "memory", "cc"); ret; })))
return -4;
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
" mov %[count],%[result]\n\t"
"1:\n\t"
" mov %[result],%[tmp]\n\t"
" add %[inc],%[tmp]\n\t"
" jle 2f\n\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " cmpxchg %[tmp],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
: [count] "+m" (sem->count), [result] "=&a" (result),
[tmp] "=&r" (tmp)
: [inc] "i" (0x00000001L)
: "memory", "cc");
return result >= 0;
}
# 140 "./arch/x86/include/asm/rwsem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __down_write(struct rw_semaphore *sem)
{
({ long tmp; struct rw_semaphore* ret; asm volatile("# beginning down_write\n\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " xadd %[tmp],(%[sem])\n\t" " test " " " "%k1" " " "," " " "%k1" " " "\n\t" " jz 1f\n" " call " "call_rwsem_down_write_failed" "\n" "1:\n" "# ending down_write" : "+m" (sem->count), [tmp] "=d" (tmp), "=a" (ret), "+r" (current_stack_pointer) : [sem] "a" (sem), "[tmp]" (((-0xffffffffL -1) + 0x00000001L)) : "memory", "cc"); ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __down_write_killable(struct rw_semaphore *sem)
{
if (IS_ERR(({ long tmp; struct rw_semaphore* ret; asm volatile("# beginning down_write\n\t" ".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " xadd %[tmp],(%[sem])\n\t" " test " " " "%k1" " " "," " " "%k1" " " "\n\t" " jz 1f\n" " call " "call_rwsem_down_write_failed_killable" "\n" "1:\n" "# ending down_write" : "+m" (sem->count), [tmp] "=d" (tmp), "=a" (ret), "+r" (current_stack_pointer) : [sem] "a" (sem), "[tmp]" (((-0xffffffffL -1) + 0x00000001L)) : "memory", "cc"); ret; })))
return -4;
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __down_write_trylock(struct rw_semaphore *sem)
{
bool result;
long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
" mov %[count],%[tmp0]\n\t"
"1:\n\t"
" test " " " "%k1" " " "," " " "%k1" " " "\n\t"
" jnz 2f\n\t"
" mov %[tmp0],%[tmp1]\n\t"
" add %[inc],%[tmp1]\n\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " cmpxchg %[tmp1],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
"\n\tset" "e" " %[_cc_" "e" "]\n"
"# ending __down_write_trylock\n\t"
: [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
[tmp1] "=&r" (tmp1), [_cc_e] "=qm" (result)
: [inc] "er" (((-0xffffffffL -1) + 0x00000001L))
: "memory");
return result;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __up_read(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_read\n\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " xadd %[tmp],(%[sem])\n\t"
" jns 1f\n\t"
" call call_rwsem_wake\n"
"1:\n"
"# ending __up_read\n"
: "+m" (sem->count), [tmp] "=d" (tmp)
: [sem] "a" (sem), "[tmp]" (-0x00000001L)
: "memory", "cc");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __up_write(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_write\n\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " xadd %[tmp],(%[sem])\n\t"
" jns 1f\n\t"
" call call_rwsem_wake\n"
"1:\n\t"
"# ending __up_write\n"
: "+m" (sem->count), [tmp] "=d" (tmp)
: [sem] "a" (sem), "[tmp]" (-((-0xffffffffL -1) + 0x00000001L))
: "memory", "cc");
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __downgrade_write(struct rw_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "addq" " " "%[inc],(%[sem])\n\t"
" jns 1f\n\t"
" call call_rwsem_downgrade_wake\n"
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
: [sem] "a" (sem), [inc] "er" (-(-0xffffffffL -1))
: "memory", "cc");
}
# 62 "./include/linux/rwsem.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
# 96 "./include/linux/rwsem.h"
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
# 112 "./include/linux/rwsem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rwsem_is_contended(struct rw_semaphore *sem)
{
return !list_empty(&sem->wait_list);
}
extern void down_read(struct rw_semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_read_killable(struct rw_semaphore *sem);
extern int down_read_trylock(struct rw_semaphore *sem);
extern void down_write(struct rw_semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_write_killable(struct rw_semaphore *sem);
extern int down_write_trylock(struct rw_semaphore *sem);
extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);
extern void downgrade_write(struct rw_semaphore *sem);
# 7 "./arch/x86/include/asm/mmu.h" 2
typedef struct {
u64 ctx_id;
# 28 "./arch/x86/include/asm/mmu.h"
atomic64_t tlb_gen;
struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;
unsigned short ia32_compat;
struct mutex lock;
void *vdso;
const struct vdso_image *vdso_image;
atomic_t perf_rdpmc_allowed;
u16 pkey_allocation_map;
s16 execute_only_pkey;
} mm_context_t;
void leave_mm(int cpu);
# 32 "./arch/x86/include/asm/acpi.h" 2
# 1 "./arch/x86/include/asm/realmode.h" 1
# 15 "./arch/x86/include/asm/realmode.h"
# 1 "./arch/x86/include/asm/io.h" 1
# 44 "./arch/x86/include/asm/io.h"
# 1 "./arch/x86/include/generated/asm/early_ioremap.h" 1
# 1 "./include/asm-generic/early_ioremap.h" 1
# 11 "./include/asm-generic/early_ioremap.h"
extern void *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap_ro(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap_prot(resource_size_t phys_addr,
unsigned long size, unsigned long prot_val);
extern void early_iounmap(void *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
extern void early_ioremap_shutdown(void);
extern void early_ioremap_init(void);
extern void early_ioremap_setup(void);
extern void early_ioremap_reset(void);
extern void copy_from_early_mem(void *dest, phys_addr_t src,
unsigned long size);
# 2 "./arch/x86/include/generated/asm/early_ioremap.h" 2
# 45 "./arch/x86/include/asm/io.h" 2
# 57 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) :"memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) :"memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) :"memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char __readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) ); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short __readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) ); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) ); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) :"memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) :"memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) :"memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) ); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) ); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) ); }
# 97 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 readq(const volatile void *addr) { u64 ret; asm volatile("mov" "q" " %1,%0":"=r" (ret) :"m" (*(volatile u64 *)addr) :"memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 __readq(const volatile void *addr) { u64 ret; asm volatile("mov" "q" " %1,%0":"=r" (ret) :"m" (*(volatile u64 *)addr) ); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writeq(u64 val, volatile void *addr) { asm volatile("mov" "q" " %0,%1": :"r" (val), "m" (*(volatile u64 *)addr) :"memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writeq(u64 val, volatile void *addr) { asm volatile("mov" "q" " %0,%1": :"r" (val), "m" (*(volatile u64 *)addr) ); }
# 115 "./arch/x86/include/asm/io.h"
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
# 131 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) phys_addr_t virt_to_phys(volatile void *address)
{
return __phys_addr_nodebug((unsigned long)(address));
}
# 150 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *phys_to_virt(phys_addr_t address)
{
return ((void *)((unsigned long)(address)+((unsigned long)page_offset_base)));
}
# 166 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int isa_virt_to_bus(volatile void *address)
{
return (unsigned int)virt_to_phys(address);
}
# 186 "./arch/x86/include/asm/io.h"
extern void *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void *ioremap_uc(resource_size_t offset, unsigned long size);
extern void *ioremap_cache(resource_size_t offset, unsigned long size);
extern void *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
extern void *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
# 211 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void *addr);
extern void set_iounmap_nonlazy(void);
void memcpy_fromio(void *, const volatile void *, size_t);
void memcpy_toio(volatile void *, const void *, size_t);
void memset_io(volatile void *, int, size_t);
# 1 "./include/asm-generic/iomap.h" 1
# 29 "./include/asm-generic/iomap.h"
extern unsigned int ioread8(void *);
extern unsigned int ioread16(void *);
extern unsigned int ioread16be(void *);
extern unsigned int ioread32(void *);
extern unsigned int ioread32be(void *);
extern u64 ioread64(void *);
extern u64 ioread64be(void *);
extern void iowrite8(u8, void *);
extern void iowrite16(u16, void *);
extern void iowrite16be(u16, void *);
extern void iowrite32(u32, void *);
extern void iowrite32be(u32, void *);
extern void iowrite64(u64, void *);
extern void iowrite64be(u64, void *);
# 60 "./include/asm-generic/iomap.h"
extern void ioread8_rep(void *port, void *buf, unsigned long count);
extern void ioread16_rep(void *port, void *buf, unsigned long count);
extern void ioread32_rep(void *port, void *buf, unsigned long count);
extern void iowrite8_rep(void *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void *port, const void *buf, unsigned long count);
extern void *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void *);
# 84 "./include/asm-generic/iomap.h"
struct pci_dev;
extern void pci_iounmap(struct pci_dev *dev, void *);
# 1 "./include/asm-generic/pci_iomap.h" 1
# 10 "./include/asm-generic/pci_iomap.h"
struct pci_dev;
extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
extern void *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
extern void *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
# 93 "./include/asm-generic/iomap.h" 2
# 233 "./arch/x86/include/asm/io.h" 2
# 246 "./arch/x86/include/asm/io.h"
extern void native_io_delay(void);
extern int io_delay_type;
extern void io_delay_init(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void slow_down_io(void)
{
native_io_delay();
}
# 278 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sev_key_active(void) { return false; }
# 342 "./arch/x86/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outb(unsigned char value, int port) { asm volatile("out" "b" " %" "b" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char inb(int port) { unsigned char value; asm volatile("in" "b" " %w1, %" "b" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outb_p(unsigned char value, int port) { outb(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char inb_p(int port) { unsigned char value = inb(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsb(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned char *value = (unsigned char *)addr; while (count) { outb(*value, port); value++; count--; } } else { asm volatile("rep; outs" "b" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insb(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned char *value = (unsigned char *)addr; while (count) { *value = inb(port); value++; count--; } } else { asm volatile("rep; ins" "b" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outw(unsigned short value, int port) { asm volatile("out" "w" " %" "w" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short inw(int port) { unsigned short value; asm volatile("in" "w" " %w1, %" "w" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outw_p(unsigned short value, int port) { outw(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short inw_p(int port) { unsigned short value = inw(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsw(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned short *value = (unsigned short *)addr; while (count) { outw(*value, port); value++; count--; } } else { asm volatile("rep; outs" "w" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insw(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned short *value = (unsigned short *)addr; while (count) { *value = inw(port); value++; count--; } } else { asm volatile("rep; ins" "w" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outl(unsigned int value, int port) { asm volatile("out" "l" " %" "" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int inl(int port) { unsigned int value; asm volatile("in" "l" " %w1, %" "" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outl_p(unsigned int value, int port) { outl(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int inl_p(int port) { unsigned int value = inl(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsl(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned int *value = (unsigned int *)addr; while (count) { outl(*value, port); value++; count--; } } else { asm volatile("rep; outs" "l" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insl(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned int *value = (unsigned int *)addr; while (count) { *value = inl(port); value++; count--; } } else { asm volatile("rep; ins" "l" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } }
# 366 "./arch/x86/include/asm/io.h"
extern void *xlate_dev_mem_ptr(phys_addr_t phys);
extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
enum page_cache_mode pcm);
extern void *ioremap_wc(resource_size_t offset, unsigned long size);
extern void *ioremap_wt(resource_size_t offset, unsigned long size);
extern bool is_early_ioremap_ptep(pte_t *ptep);
# 1 "./include/asm-generic/io.h" 1
# 324 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsb(const volatile void *addr, void *buffer,
unsigned int count)
{
if (count) {
u8 *buf = buffer;
do {
u8 x = __readb(addr);
*buf++ = x;
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsw(const volatile void *addr, void *buffer,
unsigned int count)
{
if (count) {
u16 *buf = buffer;
do {
u16 x = __readw(addr);
*buf++ = x;
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsl(const volatile void *addr, void *buffer,
unsigned int count)
{
if (count) {
u32 *buf = buffer;
do {
u32 x = __readl(addr);
*buf++ = x;
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsq(const volatile void *addr, void *buffer,
unsigned int count)
{
if (count) {
u64 *buf = buffer;
do {
u64 x = __readq(addr);
*buf++ = x;
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesb(volatile void *addr, const void *buffer,
unsigned int count)
{
if (count) {
const u8 *buf = buffer;
do {
__writeb(*buf++, addr);
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesw(volatile void *addr, const void *buffer,
unsigned int count)
{
if (count) {
const u16 *buf = buffer;
do {
__writew(*buf++, addr);
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesl(volatile void *addr, const void *buffer,
unsigned int count)
{
if (count) {
const u32 *buf = buffer;
do {
__writel(*buf++, addr);
} while (--count);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesq(volatile void *addr, const void *buffer,
unsigned int count)
{
if (count) {
const u64 *buf = buffer;
do {
__writeq(*buf++, addr);
} while (--count);
}
}
# 458 "./include/asm-generic/io.h"
# 1 "./include/linux/logic_pio.h" 1
# 11 "./include/linux/logic_pio.h"
# 1 "./include/linux/fwnode.h" 1
# 17 "./include/linux/fwnode.h"
struct fwnode_operations;
struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
};
struct fwnode_endpoint {
unsigned int port;
unsigned int id;
const struct fwnode_handle *local_fwnode;
};
# 45 "./include/linux/fwnode.h"
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
u64 args[8];
};
# 72 "./include/linux/fwnode.h"
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval);
int
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *
(*get_named_child_node)(const struct fwnode_handle *fwnode,
const char *name);
int (*get_reference_args)(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args);
struct fwnode_handle *
(*graph_get_next_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev);
struct fwnode_handle *
(*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
};
# 12 "./include/linux/logic_pio.h" 2
enum {
LOGIC_PIO_INDIRECT,
LOGIC_PIO_CPU_MMIO,
};
struct logic_pio_hwaddr {
struct list_head list;
struct fwnode_handle *fwnode;
resource_size_t hw_start;
resource_size_t io_start;
resource_size_t size;
unsigned long flags;
void *hostdata;
const struct logic_pio_host_ops *ops;
};
struct logic_pio_host_ops {
u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
void (*out)(void *hostdata, unsigned long addr, u32 val,
size_t dwidth);
u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
size_t dwidth, unsigned int count);
void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
size_t dwidth, unsigned int count);
};
# 116 "./include/linux/logic_pio.h"
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
resource_size_t logic_pio_to_hwaddr(unsigned long pio);
unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
# 459 "./include/asm-generic/io.h" 2
# 641 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insb_p(unsigned long addr, void *buffer, unsigned int count)
{
insb(addr, buffer, count);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insw_p(unsigned long addr, void *buffer, unsigned int count)
{
insw(addr, buffer, count);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insl_p(unsigned long addr, void *buffer, unsigned int count)
{
insl(addr, buffer, count);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsb_p(unsigned long addr, const void *buffer,
unsigned int count)
{
outsb(addr, buffer, count);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsw_p(unsigned long addr, const void *buffer,
unsigned int count)
{
outsw(addr, buffer, count);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsl_p(unsigned long addr, const void *buffer,
unsigned int count)
{
outsl(addr, buffer, count);
}
# 894 "./include/asm-generic/io.h"
# 1 "./include/linux/vmalloc.h" 1
# 11 "./include/linux/vmalloc.h"
# 1 "./include/linux/overflow.h" 1
# 247 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (({ typeof(a) __a = (a); typeof(b) __b = (b); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))
return (~(size_t)0);
return bytes;
}
# 269 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t array3_size(size_t a, size_t b, size_t c)
{
size_t bytes;
if (({ typeof(a) __a = (a); typeof(b) __b = (b); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))
return (~(size_t)0);
if (({ typeof(bytes) __a = (bytes); typeof(c) __b = (c); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))
return (~(size_t)0);
return bytes;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t __ab_c_size(size_t n, size_t size, size_t c)
{
size_t bytes;
if (({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))
return (~(size_t)0);
if (({ typeof(bytes) __a = (bytes); typeof(c) __b = (c); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_add_overflow(__a, __b, __d); }))
return (~(size_t)0);
return bytes;
}
# 12 "./include/linux/vmalloc.h" 2
struct vm_area_struct;
struct notifier_block;
# 34 "./include/linux/vmalloc.h"
struct vm_struct {
struct vm_struct *next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
};
struct vmap_area {
unsigned long va_start;
unsigned long va_end;
unsigned long flags;
struct rb_node rb_node;
struct list_head list;
struct llist_node purge_list;
struct vm_struct *vm;
struct callback_head callback_head;
};
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count,
int node, pgprot_t prot);
extern void vm_unmap_aliases(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) vmalloc_init(void);
extern void *vmalloc(unsigned long size);
extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
# 93 "./include/linux/vmalloc.h"
extern void *__vmalloc_node_flags_caller(unsigned long size,
int node, gfp_t flags, void *caller);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
void vmalloc_sync_all(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t get_vm_area_size(const struct vm_struct *area)
{
if (!(area->flags & 0x00000040))
return area->size - ((1UL) << 12);
else
return area->size;
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page **pages);
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
# 163 "./include/linux/vmalloc.h"
extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
extern void free_vm_area(struct vm_struct *area);
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) void vm_area_add_early(struct vm_struct *vm);
extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) void vm_area_register_early(struct vm_struct *vm, size_t align);
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# 206 "./include/linux/vmalloc.h"
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
# 895 "./include/asm-generic/io.h" 2
# 1041 "./include/asm-generic/io.h"
extern void *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void *p);
# 1051 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xlate_dev_kmem_ptr(void *addr)
{
return addr;
}
# 384 "./arch/x86/include/asm/io.h" 2
extern int __attribute__((__warn_unused_result__)) arch_phys_wc_index(int handle);
extern int __attribute__((__warn_unused_result__)) arch_phys_wc_add(unsigned long base,
unsigned long size);
extern void arch_phys_wc_del(int handle);
extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
extern bool arch_memremap_can_ram_remap(resource_size_t offset,
unsigned long size,
unsigned long flags);
extern bool phys_mem_access_encrypted(unsigned long phys_addr,
unsigned long size);
# 16 "./arch/x86/include/asm/realmode.h" 2
struct real_mode_header {
u32 text_start;
u32 ro_end;
u32 trampoline_start;
u32 trampoline_status;
u32 trampoline_header;
u32 trampoline_pgd;
u32 wakeup_start;
u32 wakeup_header;
u32 machine_real_restart_asm;
u32 machine_real_restart_seg;
};
struct trampoline_header {
u64 start;
u64 efer;
u32 cr4;
u32 flags;
};
extern struct real_mode_header *real_mode_header;
extern unsigned char real_mode_blob_end[];
extern unsigned long initial_code;
extern unsigned long initial_gs;
extern unsigned long initial_stack;
extern unsigned char real_mode_blob[];
extern unsigned char real_mode_relocs[];
extern unsigned char secondary_startup_64[];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t real_mode_size_needed(void)
{
if (real_mode_header)
return 0;
return ((((real_mode_blob_end - real_mode_blob)) + ((typeof((real_mode_blob_end - real_mode_blob)))((((1UL) << 12))) - 1)) & ~((typeof((real_mode_blob_end - real_mode_blob)))((((1UL) << 12))) - 1));
}
void set_real_mode_mem(phys_addr_t mem, size_t size);
void reserve_real_mode(void);
# 34 "./arch/x86/include/asm/acpi.h" 2
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
extern u32 acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
struct device;
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disable_acpi(void)
{
acpi_disabled = 1;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
extern int (*acpi_suspend_lowlevel)(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
{
if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == 2 &&
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_stepping < 0x0A)
return 1;
else if ((__builtin_constant_p((19*32 + (4))) && ( ((((19*32 + (4)))>>5)==(0) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((19*32 + (4)))>>5)==(1) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((19*32 + (4)))>>5)==(2) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(3) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((19*32 + (4)))>>5)==(4) && (1UL<<(((19*32 + (4)))&31) & (0) )) || ((((19*32 + (4)))>>5)==(5) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(6) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(7) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(8) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(9) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(10) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(11) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(12) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(13) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(14) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(15) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(16) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(17) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(18) && (1UL<<(((19*32 + (4)))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p(((19*32 + (4)))) ? constant_test_bit(((19*32 + (4))), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((19*32 + (4))), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))
return 1;
else
return max_cstate;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_has_acpi_pdc(void)
{
struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(cpu_info)))) *)((&(cpu_info)))); (typeof((typeof(*((&(cpu_info)))) *)((&(cpu_info))))) (__ptr + (((__per_cpu_offset[(0)])))); }); }));
return (c->x86_vendor == 0 ||
c->x86_vendor == 5);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_acpi_set_pdc_bits(u32 *buf)
{
struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(cpu_info)))) *)((&(cpu_info)))); (typeof((typeof(*((&(cpu_info)))) *)((&(cpu_info))))) (__ptr + (((__per_cpu_offset[(0)])))); }); }));
buf[2] |= ((0x0010) | (0x0008) | (0x0002) | (0x0100) | (0x0200));
if ((__builtin_constant_p(( 4*32+ 7)) && ( (((( 4*32+ 7))>>5)==(0) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+ 7))>>5)==(1) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+ 7))>>5)==(2) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(3) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+ 7))>>5)==(4) && (1UL<<((( 4*32+ 7))&31) & (0) )) || (((( 4*32+ 7))>>5)==(5) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(6) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(7) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(8) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(9) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(10) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(11) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(12) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(13) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(14) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(15) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(16) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(17) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(18) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 4*32+ 7))) ? constant_test_bit((( 4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit((( 4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))
buf[2] |= ((0x0008) | (0x0002) | (0x0020) | (0x0800) | (0x0001));
if ((__builtin_constant_p(( 0*32+22)) && ( (((( 0*32+22))>>5)==(0) && (1UL<<((( 0*32+22))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 0*32+22))>>5)==(1) && (1UL<<((( 0*32+22))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 0*32+22))>>5)==(2) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(3) && (1UL<<((( 0*32+22))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 0*32+22))>>5)==(4) && (1UL<<((( 0*32+22))&31) & (0) )) || (((( 0*32+22))>>5)==(5) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(6) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(7) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(8) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(9) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(10) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(11) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(12) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(13) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(14) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(15) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(16) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(17) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(18) && (1UL<<((( 0*32+22))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 0*32+22))) ? constant_test_bit((( 0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit((( 0*32+22)), ((unsigned long *)((c)->x86_capability))))))
buf[2] |= (0x0004);
if (!(__builtin_constant_p(( 4*32+ 3)) && ( (((( 4*32+ 3))>>5)==(0) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+ 3))>>5)==(1) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+ 3))>>5)==(2) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(3) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+ 3))>>5)==(4) && (1UL<<((( 4*32+ 3))&31) & (0) )) || (((( 4*32+ 3))>>5)==(5) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(6) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(7) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(8) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(9) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(10) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(11) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(12) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(13) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(14) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(15) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(16) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(17) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(18) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 4*32+ 3))) ? constant_test_bit((( 4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit((( 4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))
buf[2] &= ~((0x0200));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool acpi_has_cpu_in_madt(void)
{
return !!acpi_lapic;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 acpi_arch_get_root_pointer(void)
{
return x86_init.acpi.get_root_pointer();
}
void acpi_generic_reduced_hw_init(void);
u64 x86_default_get_root_pointer(void);
# 168 "./arch/x86/include/asm/acpi.h"
extern int x86_acpi_numa_init(void);
# 30 "./arch/x86/include/asm/fixmap.h" 2
# 1 "./arch/x86/include/uapi/asm/vsyscall.h" 1
enum vsyscall_num {
__NR_vgettimeofday,
__NR_vtime,
__NR_vgetcpu,
};
# 37 "./arch/x86/include/asm/fixmap.h" 2
# 76 "./arch/x86/include/asm/fixmap.h"
enum fixed_addresses {
VSYSCALL_PAGE = (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - (-10UL << 20)) >> 12,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
FIX_OHCI1394_BASE,
FIX_APIC_BASE,
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + 128 - 1,
# 106 "./arch/x86/include/asm/fixmap.h"
FIX_TEXT_POKE1,
FIX_TEXT_POKE0,
# 118 "./arch/x86/include/asm/fixmap.h"
__end_of_permanent_fixed_addresses,
# 130 "./arch/x86/include/asm/fixmap.h"
FIX_BTMAP_END =
(__end_of_permanent_fixed_addresses ^
(__end_of_permanent_fixed_addresses + (64 * 8) - 1)) &
-512
? __end_of_permanent_fixed_addresses + (64 * 8) -
(__end_of_permanent_fixed_addresses & ((64 * 8) - 1))
: __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + (64 * 8) - 1,
__end_of_fixed_addresses
};
extern void reserve_top_address(unsigned long reserve);
extern int fixmaps_set;
extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
native_set_fixmap(idx, phys, flags);
}
# 184 "./arch/x86/include/asm/fixmap.h"
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) *early_memremap_encrypted(resource_size_t phys_addr,
unsigned long size);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) *early_memremap_encrypted_wp(resource_size_t phys_addr,
unsigned long size);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) *early_memremap_decrypted(resource_size_t phys_addr,
unsigned long size);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) *early_memremap_decrypted_wp(resource_size_t phys_addr,
unsigned long size);
# 1 "./include/asm-generic/fixmap.h" 1
# 19 "./include/asm-generic/fixmap.h"
# 1 "./include/linux/mm_types.h" 1
# 1 "./include/linux/mm_types_task.h" 1
# 19 "./include/linux/mm_types_task.h"
# 1 "./arch/x86/include/asm/tlbbatch.h" 1
struct arch_tlbflush_unmap_batch {
struct cpumask cpumask;
};
# 20 "./include/linux/mm_types_task.h" 2
# 34 "./include/linux/mm_types_task.h"
struct vmacache {
u64 seqnum;
struct vm_area_struct *vmas[(1U << 2)];
};
enum {
MM_FILEPAGES,
MM_ANONPAGES,
MM_SWAPENTS,
MM_SHMEMPAGES,
NR_MM_COUNTERS
};
struct task_rss_stat {
int events;
int count[NR_MM_COUNTERS];
};
struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};
struct page_frag {
struct page *page;
__u32 offset;
__u32 size;
};
struct tlbflush_unmap_batch {
# 81 "./include/linux/mm_types_task.h"
struct arch_tlbflush_unmap_batch arch;
bool flush_required;
bool writable;
};
# 6 "./include/linux/mm_types.h" 2
# 1 "./include/linux/auxvec.h" 1
# 1 "./include/uapi/linux/auxvec.h" 1
# 1 "./arch/x86/include/uapi/asm/auxvec.h" 1
# 6 "./include/uapi/linux/auxvec.h" 2
# 6 "./include/linux/auxvec.h" 2
# 8 "./include/linux/mm_types.h" 2
# 1 "./include/linux/completion.h" 1
# 12 "./include/linux/completion.h"
# 1 "./include/linux/wait.h" 1
# 12 "./include/linux/wait.h"
# 1 "./include/uapi/linux/wait.h" 1
# 13 "./include/linux/wait.h" 2
typedef struct wait_queue_entry wait_queue_entry_t;
typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
# 27 "./include/linux/wait.h"
struct wait_queue_entry {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head entry;
};
struct wait_queue_head {
spinlock_t lock;
struct list_head head;
};
typedef struct wait_queue_head wait_queue_head_t;
struct task_struct;
# 61 "./include/linux/wait.h"
extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
# 79 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
{
wq_entry->flags = 0;
wq_entry->private = p;
wq_entry->func = default_wake_function;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
{
wq_entry->flags = 0;
wq_entry->private = ((void *)0);
wq_entry->func = func;
}
# 124 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int waitqueue_active(struct wait_queue_head *wq_head)
{
return !list_empty(&wq_head->head);
}
# 137 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool wq_has_sleeper(struct wait_queue_head *wq_head)
{
asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc");
return waitqueue_active(wq_head);
}
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_add(&wq_entry->entry, &wq_head->head);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= 0x01;
__add_wait_queue(wq_head, wq_entry);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_add_tail(&wq_entry->entry, &wq_head->head);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= 0x01;
__add_wait_queue_entry_tail(wq_head, wq_entry);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_del(&wq_entry->entry);
}
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
# 232 "./include/linux/wait.h"
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
# 716 "./include/linux/wait.h"
extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
# 1110 "./include/linux/wait.h"
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
# 13 "./include/linux/completion.h" 2
# 26 "./include/linux/completion.h"
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void complete_acquire(struct completion *x) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void complete_release(struct completion *x) {}
# 85 "./include/linux/completion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __init_completion(struct completion *x)
{
x->done = 0;
do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), "&x->wait", &__key); } while (0);
}
# 98 "./include/linux/completion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void reinit_completion(struct completion *x)
{
x->done = 0;
}
extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
unsigned long timeout);
extern long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern long wait_for_completion_killable_timeout(
struct completion *x, unsigned long timeout);
extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
# 13 "./include/linux/mm_types.h" 2
# 1 "./include/linux/uprobes.h" 1
# 32 "./include/linux/uprobes.h"
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
struct page;
enum uprobe_filter_ctx {
UPROBE_FILTER_REGISTER,
UPROBE_FILTER_UNREGISTER,
UPROBE_FILTER_MMAP,
};
struct uprobe_consumer {
int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
struct pt_regs *regs);
bool (*filter)(struct uprobe_consumer *self,
enum uprobe_filter_ctx ctx,
struct mm_struct *mm);
struct uprobe_consumer *next;
};
# 1 "./arch/x86/include/asm/uprobes.h" 1
# 26 "./arch/x86/include/asm/uprobes.h"
# 1 "./include/linux/notifier.h" 1
# 16 "./include/linux/notifier.h"
# 1 "./include/linux/srcu.h" 1
# 35 "./include/linux/srcu.h"
# 1 "./include/linux/rcu_segcblist.h" 1
# 31 "./include/linux/rcu_segcblist.h"
struct rcu_cblist {
struct callback_head *head;
struct callback_head **tail;
long len;
long len_lazy;
};
# 77 "./include/linux/rcu_segcblist.h"
struct rcu_segcblist {
struct callback_head *head;
struct callback_head **tails[4];
unsigned long gp_seq[4];
long len;
long len_lazy;
};
# 36 "./include/linux/srcu.h" 2
struct srcu_struct;
# 54 "./include/linux/srcu.h"
int init_srcu_struct(struct srcu_struct *ssp);
# 1 "./include/linux/srcutree.h" 1
# 27 "./include/linux/srcutree.h"
# 1 "./include/linux/rcu_node_tree.h" 1
# 28 "./include/linux/srcutree.h" 2
struct srcu_node;
struct srcu_struct;
struct srcu_data {
unsigned long srcu_lock_count[2];
unsigned long srcu_unlock_count[2];
spinlock_t lock __attribute__((__aligned__(1 << (6))));
struct rcu_segcblist srcu_cblist;
unsigned long srcu_gp_seq_needed;
unsigned long srcu_gp_seq_needed_exp;
bool srcu_cblist_invoking;
struct delayed_work work;
struct callback_head srcu_barrier_head;
struct srcu_node *mynode;
unsigned long grpmask;
int cpu;
struct srcu_struct *ssp;
};
struct srcu_node {
spinlock_t lock;
unsigned long srcu_have_cbs[4];
unsigned long srcu_data_have_cbs[4];
unsigned long srcu_gp_seq_needed_exp;
struct srcu_node *srcu_parent;
int grplo;
int grphi;
};
struct srcu_struct {
struct srcu_node node[(1 + (((64) + ((16)) - 1) / ((16))))];
struct srcu_node *level[2 + 1];
struct mutex srcu_cb_mutex;
spinlock_t lock;
struct mutex srcu_gp_mutex;
unsigned int srcu_idx;
unsigned long srcu_gp_seq;
unsigned long srcu_gp_seq_needed;
unsigned long srcu_gp_seq_needed_exp;
unsigned long srcu_last_gp_end;
struct srcu_data *sda;
unsigned long srcu_barrier_seq;
struct mutex srcu_barrier_mutex;
struct completion srcu_barrier_completion;
atomic_t srcu_barrier_cpu_cnt;
struct delayed_work work;
};
# 141 "./include/linux/srcutree.h"
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
# 63 "./include/linux/srcu.h" 2
void call_srcu(struct srcu_struct *ssp, struct callback_head *head,
void (*func)(struct callback_head *head));
void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
int __srcu_read_lock(struct srcu_struct *ssp) ;
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) ;
void synchronize_srcu(struct srcu_struct *ssp);
# 84 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cleanup_srcu_struct(struct srcu_struct *ssp)
{
_cleanup_srcu_struct(ssp, false);
}
# 106 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{
_cleanup_srcu_struct(ssp, true);
}
# 138 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int srcu_read_lock_held(const struct srcu_struct *ssp)
{
return 1;
}
# 197 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int srcu_read_lock(struct srcu_struct *ssp)
{
int retval;
retval = __srcu_read_lock(ssp);
do { } while (0);
return retval;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int
srcu_read_lock_notrace(struct srcu_struct *ssp)
{
int retval;
retval = __srcu_read_lock(ssp);
return retval;
}
# 223 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
do { } while (0);
__srcu_read_unlock(ssp, idx);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx)
{
__srcu_read_unlock(ssp, idx);
}
# 246 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_mb__after_srcu_read_unlock(void)
{
}
# 17 "./include/linux/notifier.h" 2
# 49 "./include/linux/notifier.h"
struct notifier_block;
typedef int (*notifier_fn_t)(struct notifier_block *nb,
unsigned long action, void *data);
struct notifier_block {
notifier_fn_t notifier_call;
struct notifier_block *next;
int priority;
};
struct atomic_notifier_head {
spinlock_t lock;
struct notifier_block *head;
};
struct blocking_notifier_head {
struct rw_semaphore rwsem;
struct notifier_block *head;
};
struct raw_notifier_head {
struct notifier_block *head;
};
struct srcu_notifier_head {
struct mutex mutex;
struct srcu_struct srcu;
struct notifier_block *head;
};
# 93 "./include/linux/notifier.h"
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
# 144 "./include/linux/notifier.h"
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_cond_register(
struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
# 194 "./include/linux/notifier.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int notifier_from_errno(int err)
{
if (err)
return 0x8000 | (0x0001 - err);
return 0x0001;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int notifier_to_errno(int ret)
{
ret &= ~0x8000;
return ret > 0x0001 ? 0x0001 - ret : 0;
}
# 238 "./include/linux/notifier.h"
extern struct blocking_notifier_head reboot_notifier_list;
# 27 "./arch/x86/include/asm/uprobes.h" 2
typedef u8 uprobe_opcode_t;
struct uprobe_xol_ops;
struct arch_uprobe {
union {
u8 insn[16];
u8 ixol[16];
};
const struct uprobe_xol_ops *ops;
union {
struct {
s32 offs;
u8 ilen;
u8 opc1;
} branch;
struct {
u8 fixups;
u8 ilen;
} defparam;
struct {
u8 reg_offset;
u8 ilen;
} push;
};
};
struct arch_uprobe_task {
unsigned long saved_scratch_register;
unsigned int saved_trap_nr;
unsigned int saved_tf;
};
# 63 "./include/linux/uprobes.h" 2
enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
UTASK_SSTEP_ACK,
UTASK_SSTEP_TRAPPED,
};
struct uprobe_task {
enum uprobe_task_state state;
union {
struct {
struct arch_uprobe_task autask;
unsigned long vaddr;
};
struct {
struct callback_head dup_xol_work;
unsigned long dup_xol_addr;
};
};
struct uprobe *active_uprobe;
unsigned long xol_vaddr;
struct return_instance *return_instances;
unsigned int depth;
};
struct return_instance {
struct uprobe *uprobe;
unsigned long func;
unsigned long stack;
unsigned long orig_ret_vaddr;
bool chained;
struct return_instance *next;
};
enum rp_check {
RP_CHECK_CALL,
RP_CHECK_CHAIN_CALL,
RP_CHECK_RET,
};
struct xol_area;
struct uprobes_state {
struct xol_area *xol_area;
};
extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm);
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
# 15 "./include/linux/mm_types.h" 2
# 1 "./include/linux/page-flags-layout.h" 1
# 1 "./include/generated/bounds.h" 1
# 7 "./include/linux/page-flags-layout.h" 2
# 16 "./include/linux/mm_types.h" 2
# 25 "./include/linux/mm_types.h"
typedef int vm_fault_t;
struct address_space;
struct mem_cgroup;
struct hmm;
# 70 "./include/linux/mm_types.h"
struct page {
unsigned long flags;
union {
struct {
struct list_head lru;
struct address_space *mapping;
unsigned long index;
unsigned long private;
};
struct {
union {
struct list_head slab_list;
struct {
struct page *next;
int pages;
int pobjects;
};
};
struct kmem_cache *slab_cache;
void *freelist;
union {
void *s_mem;
unsigned long counters;
struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
};
struct {
unsigned long compound_head;
unsigned char compound_dtor;
unsigned char compound_order;
atomic_t compound_mapcount;
};
struct {
unsigned long _compound_pad_1;
unsigned long _compound_pad_2;
struct list_head deferred_list;
};
struct {
unsigned long _pt_pad_1;
pgtable_t pmd_huge_pte;
unsigned long _pt_pad_2;
union {
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
};
spinlock_t ptl;
};
struct {
struct dev_pagemap *pgmap;
unsigned long hmm_data;
unsigned long _zd_pad_1;
};
struct callback_head callback_head;
};
union {
atomic_t _mapcount;
unsigned int page_type;
unsigned int active;
int units;
};
atomic_t _refcount;
# 207 "./include/linux/mm_types.h"
} __attribute__((__aligned__(2 * sizeof(unsigned long))));
# 217 "./include/linux/mm_types.h"
struct page_frag_cache {
void * va;
__u16 offset;
__u16 size;
unsigned int pagecnt_bias;
bool pfmemalloc;
};
typedef unsigned long vm_flags_t;
struct vm_region {
struct rb_node vm_rb;
vm_flags_t vm_flags;
unsigned long vm_start;
unsigned long vm_end;
unsigned long vm_top;
unsigned long vm_pgoff;
struct file *vm_file;
int vm_usage;
bool vm_icache_flushed : 1;
};
# 260 "./include/linux/mm_types.h"
struct vm_userfaultfd_ctx {};
# 269 "./include/linux/mm_types.h"
struct vm_area_struct {
unsigned long vm_start;
unsigned long vm_end;
struct vm_area_struct *vm_next, *vm_prev;
struct rb_node vm_rb;
unsigned long rb_subtree_gap;
struct mm_struct *vm_mm;
pgprot_t vm_page_prot;
unsigned long vm_flags;
struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} shared;
struct list_head anon_vma_chain;
struct anon_vma *anon_vma;
const struct vm_operations_struct *vm_ops;
unsigned long vm_pgoff;
struct file * vm_file;
void * vm_private_data;
atomic_long_t swap_readahead_info;
struct mempolicy *vm_policy;
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} ;
struct core_thread {
struct task_struct *task;
struct core_thread *next;
};
struct core_state {
atomic_t nr_threads;
struct core_thread dumper;
struct completion startup;
};
struct kioctx_table;
struct mm_struct {
struct {
struct vm_area_struct *mmap;
struct rb_root mm_rb;
u64 vmacache_seqnum;
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
unsigned long mmap_base;
unsigned long mmap_legacy_base;
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
unsigned long task_size;
unsigned long highest_vm_end;
pgd_t * pgd;
# 375 "./include/linux/mm_types.h"
atomic_t mm_users;
# 384 "./include/linux/mm_types.h"
atomic_t mm_count;
atomic_long_t pgtables_bytes;
int map_count;
spinlock_t page_table_lock;
struct rw_semaphore mmap_sem;
struct list_head mmlist;
unsigned long hiwater_rss;
unsigned long hiwater_vm;
unsigned long total_vm;
unsigned long locked_vm;
unsigned long pinned_vm;
unsigned long data_vm;
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long def_flags;
spinlock_t arg_lock;
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[(2*(2 + 20 + 1))];
struct mm_rss_stat rss_stat;
struct linux_binfmt *binfmt;
mm_context_t context;
unsigned long flags;
struct core_state *core_state;
atomic_t membarrier_state;
spinlock_t ioctx_lock;
struct kioctx_table *ioctx_table;
# 455 "./include/linux/mm_types.h"
struct user_namespace *user_ns;
struct file *exe_file;
struct mmu_notifier_mm *mmu_notifier_mm;
# 484 "./include/linux/mm_types.h"
atomic_t tlb_flush_pending;
bool tlb_flush_batched;
struct uprobes_state uprobes_state;
atomic_long_t hugetlb_usage;
struct work_struct async_put_work;
} ;
unsigned long cpu_bitmap[];
};
extern struct mm_struct init_mm;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;
cpu_bitmap += __builtin_offsetof(struct mm_struct, cpu_bitmap);
cpumask_clear((struct cpumask *)cpu_bitmap);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) cpumask_t *mm_cpumask(struct mm_struct *mm)
{
return (struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_tlb_flush_pending(struct mm_struct *mm)
{
atomic_set(&mm->tlb_flush_pending, 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_tlb_flush_pending(struct mm_struct *mm)
{
atomic_inc(&mm->tlb_flush_pending);
# 575 "./include/linux/mm_types.h"
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_tlb_flush_pending(struct mm_struct *mm)
{
# 587 "./include/linux/mm_types.h"
atomic_dec(&mm->tlb_flush_pending);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_tlb_flush_pending(struct mm_struct *mm)
{
# 600 "./include/linux/mm_types.h"
return atomic_read(&mm->tlb_flush_pending);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_tlb_flush_nested(struct mm_struct *mm)
{
return atomic_read(&mm->tlb_flush_pending) > 1;
}
struct vm_fault;
struct vm_special_mapping {
const char *name;
struct page **pages;
vm_fault_t (*fault)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma,
struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
};
enum tlb_flush_reason {
TLB_FLUSH_ON_TASK_SWITCH,
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};
typedef struct {
unsigned long val;
} swp_entry_t;
# 20 "./include/asm-generic/fixmap.h" 2
# 30 "./include/asm-generic/fixmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long fix_to_virt(const unsigned int idx)
{
do { extern void __compiletime_assert_32(void) ; if (!(!(idx >= __end_of_fixed_addresses))) __compiletime_assert_32(); } while (0);
return (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - ((idx) << 12));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long virt_to_fix(const unsigned long vaddr)
{
do { if (__builtin_expect(!!(vaddr >= ((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) || vaddr < (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - (__end_of_permanent_fixed_addresses << 12))), 0)) do { do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("./include/asm-generic/fixmap.h"), "i" (38), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (13)); }); __builtin_unreachable(); } while (0); } while (0); } while (0);
return ((((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - ((vaddr)&(~(((1UL) << 12)-1)))) >> 12);
}
# 194 "./arch/x86/include/asm/fixmap.h" 2
void __early_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
# 11 "./arch/x86/include/asm/apic.h" 2
# 1 "./arch/x86/include/asm/hardirq.h" 1
typedef struct {
u16 __softirq_pending;
unsigned int __nmi_count;
unsigned int apic_timer_irqs;
unsigned int irq_spurious_count;
unsigned int icr_read_retry_count;
unsigned int kvm_posted_intr_ipis;
unsigned int kvm_posted_intr_wakeup_ipis;
unsigned int kvm_posted_intr_nested_ipis;
unsigned int x86_platform_ipis;
unsigned int apic_perf_irqs;
unsigned int apic_irq_work_irqs;
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_threshold_count;
unsigned int irq_deferred_error_count;
# 47 "./arch/x86/include/asm/hardirq.h"
} __attribute__((__aligned__((1 << (6))))) irq_cpustat_t;
extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << (6)))));
extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
extern u64 arch_irq_stat(void);
# 80 "./arch/x86/include/asm/hardirq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kvm_set_cpu_l1tf_flush_l1d(void) { }
# 14 "./arch/x86/include/asm/apic.h" 2
# 44 "./arch/x86/include/asm/apic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void generic_apic_probe(void)
{
}
extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
extern unsigned int lapic_timer_frequency;
extern enum apic_intr_mode_id apic_intr_mode;
enum apic_intr_mode_id {
APIC_PIC,
APIC_VIRTUAL_WIRE,
APIC_VIRTUAL_WIRE_NO_CONFIG,
APIC_SYMMETRIC_IO,
APIC_SYMMETRIC_IO_NO_ROUTING
};
extern void __inquire_remote_apic(int apicid);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void default_inquire_remote_apic(int apicid)
{
if (apic_verbosity >= 2)
__inquire_remote_apic(apicid);
}
# 88 "./arch/x86/include/asm/apic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool apic_from_smp_config(void)
{
return smp_found_config && !disable_apic;
}
# 100 "./arch/x86/include/asm/apic.h"
extern int setup_profiling_timer(unsigned int);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg);
asm volatile ("661:\n\t" "movl %0, %P1" "\n662:\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "(19*32 + (5))" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "664""1"":\n\t" "xchgl %0, %P1" "\n" "665""1" ":\n\t" ".popsection\n" : "=r" (v), "=m" (*addr) : "i" (0), "0" (v), "m" (*addr));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg));
}
extern void native_apic_wait_icr_idle(void);
extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool apic_is_x2apic_enabled(void)
{
u64 msr;
if (rdmsrl_safe(0x0000001b, &msr))
return false;
return msr & (1UL << 10);
}
extern void enable_IR_x2apic(void);
extern int get_physical_broadcast(void);
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void apic_intr_mode_init(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern void lapic_update_tsc_freq(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int apic_force_enable(unsigned long addr)
{
return -1;
}
extern void apic_bsp_setup(bool upmode);
extern void apic_ap_setup(void);
extern int apic_is_clustered_box(void);
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
extern void lapic_online(void);
extern void lapic_offline(void);
# 264 "./arch/x86/include/asm/apic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_x2apic(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void x2apic_setup(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int x2apic_enabled(void) { return 0; }
struct irq_data;
# 284 "./arch/x86/include/asm/apic.h"
struct apic {
void (*eoi_write)(u32 reg, u32 v);
void (*native_eoi_write)(u32 reg, u32 v);
void (*write)(u32 reg, u32 v);
u32 (*read)(u32 reg);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
void (*send_IPI)(int cpu, int vector);
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *msk, int vec);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
u32 dest_logical;
u32 disable_esr;
u32 irq_delivery_mode;
u32 irq_dest_mode;
u32 (*calc_dest_apicid)(unsigned int cpu);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
int (*apic_id_valid)(u32 apicid);
int (*apic_id_registered)(void);
bool (*check_apicid_used)(physid_mask_t *map, int apicid);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
void (*setup_apic_routing)(void);
int (*cpu_present_to_apicid)(int mps_cpu);
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
int (*check_phys_apicid_present)(int phys_apicid);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
u32 (*get_apic_id)(unsigned long x);
u32 (*set_apic_id)(unsigned int id);
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
void (*inquire_remote_apic)(int apicid);
# 350 "./arch/x86/include/asm/apic.h"
char *name;
};
extern struct apic *apic;
# 378 "./arch/x86/include/asm/apic.h"
extern struct apic *__apicdrivers[], *__apicdrivers_end[];
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
extern int lapic_can_unplug_cpu(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 apic_read(u32 reg)
{
return apic->read(reg);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_write(u32 reg, u32 val)
{
apic->write(reg, val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_eoi(void)
{
apic->eoi_write(0xB0, 0x0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 apic_icr_read(void)
{
return apic->icr_read();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_icr_write(u32 low, u32 high)
{
apic->icr_write(low, high);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_wait_icr_idle(void)
{
apic->wait_icr_idle();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 safe_apic_wait_icr_idle(void)
{
return apic->safe_wait_icr_idle();
}
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
# 440 "./arch/x86/include/asm/apic.h"
extern void apic_ack_irq(struct irq_data *data);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ack_APIC_irq(void)
{
apic_eoi();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = ((apic_read(0x30)) & 0xFFu);
if (((ver) >= 0x14) || (__builtin_constant_p(( 3*32+26)) && ( (((( 3*32+26))>>5)==(0) && (1UL<<((( 3*32+26))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 3*32+26))>>5)==(1) && (1UL<<((( 3*32+26))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 3*32+26))>>5)==(2) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(3) && (1UL<<((( 3*32+26))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 3*32+26))>>5)==(4) && (1UL<<((( 3*32+26))&31) & (0) )) || (((( 3*32+26))>>5)==(5) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(6) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(7) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(8) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(9) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(10) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(11) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(12) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(13) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(14) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(15) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(16) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(17) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(18) && (1UL<<((( 3*32+26))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 3*32+26))) ? constant_test_bit((( 3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit((( 3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))
return (x >> 24) & 0xFF;
else
return (x >> 24) & 0x0F;
}
# 468 "./arch/x86/include/asm/apic.h"
extern void apic_send_IPI_self(int vector);
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x2apic_extra_bits;
extern void generic_bigsmp_probe(void);
# 1 "./arch/x86/include/asm/smp.h" 1
# 478 "./arch/x86/include/asm/apic.h" 2
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
extern struct apic apic_noop;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int read_apic_id(void)
{
unsigned int reg = apic_read(0x20);
return apic->get_apic_id(reg);
}
extern int default_apic_id_valid(u32 apicid);
extern int default_acpi_madt_oem_check(char *, char *);
extern void default_setup_apic_routing(void);
extern u32 apic_default_calc_apicid(unsigned int cpu);
extern u32 apic_flat_calc_apicid(unsigned int cpu);
extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
bool apic_id_is_primary_thread(unsigned int id);
extern void irq_enter(void);
extern void irq_exit(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void entering_irq(void)
{
irq_enter();
kvm_set_cpu_l1tf_flush_l1d();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void entering_ack_irq(void)
{
entering_irq();
ack_APIC_irq();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ipi_entering_ack_irq(void)
{
irq_enter();
ack_APIC_irq();
kvm_set_cpu_l1tf_flush_l1d();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exiting_irq(void)
{
irq_exit();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exiting_ack_irq(void)
{
ack_APIC_irq();
irq_exit();
}
extern void ioapic_zap_locks(void);
# 14 "./arch/x86/include/asm/smp.h" 2
# 1 "./arch/x86/include/asm/io_apic.h" 1
# 1 "./arch/x86/include/asm/irq_vectors.h" 1
# 9 "./arch/x86/include/asm/io_apic.h" 2
# 28 "./arch/x86/include/asm/io_apic.h"
union IO_APIC_reg_00 {
u32 raw;
struct {
u32 __reserved_2 : 14,
LTS : 1,
delivery_type : 1,
__reserved_1 : 8,
ID : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_01 {
u32 raw;
struct {
u32 version : 8,
__reserved_2 : 7,
PRQ : 1,
entries : 8,
__reserved_1 : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_02 {
u32 raw;
struct {
u32 __reserved_2 : 24,
arbitration : 4,
__reserved_1 : 4;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_03 {
u32 raw;
struct {
u32 boot_DT : 1,
__reserved_1 : 31;
} __attribute__ ((packed)) bits;
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3,
dest_mode : 1,
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
__u32 __reserved_3 : 24,
dest : 8;
} __attribute__ ((packed));
struct IR_IO_APIC_route_entry {
__u64 vector : 8,
zero : 3,
index2 : 1,
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1,
mask : 1,
reserved : 31,
format : 1,
index : 15;
} __attribute__ ((packed));
struct irq_alloc_info;
struct ioapic_domain_cfg;
# 123 "./arch/x86/include/asm/io_apic.h"
extern int nr_ioapics;
extern int mpc_ioapic_id(int ioapic);
extern unsigned int mpc_ioapic_addr(int ioapic);
extern int mp_irq_entries;
extern struct mpc_intsrc mp_irqs[(256 * 4)];
extern int skip_ioapic_setup;
extern int noioapicquirk;
extern int noioapicreroute;
extern u32 gsi_top;
extern unsigned long io_apic_irqs;
# 156 "./arch/x86/include/asm/io_apic.h"
struct irq_cfg;
extern void ioapic_insert_resources(void);
extern int arch_early_ioapic_init(void);
extern int save_ioapic_entries(void);
extern void mask_ioapic_entries(void);
extern int restore_ioapic_entries(void);
extern void setup_ioapic_ids_from_mpc(void);
extern void setup_ioapic_ids_from_mpc_nocheck(void);
extern int mp_find_ioapic(u32 gsi);
extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags,
struct irq_alloc_info *info);
extern void mp_unmap_irq(int irq);
extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
struct ioapic_domain_cfg *cfg);
extern int mp_unregister_ioapic(u32 gsi_base);
extern int mp_ioapic_registered(u32 gsi_base);
extern void ioapic_set_alloc_attr(struct irq_alloc_info *info,
int node, int trigger, int polarity);
extern void mp_save_irq(struct mpc_intsrc *m);
extern void disable_ioapic_support(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) io_apic_init_mappings(void);
extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
extern void native_restore_boot_irq_mode(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
return x86_apic_ops.io_apic_read(apic, reg);
}
extern void setup_IO_APIC(void);
extern void enable_IO_APIC(void);
extern void clear_IO_APIC(void);
extern void restore_boot_irq_mode(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
extern void print_IO_APICs(void);
# 16 "./arch/x86/include/asm/smp.h" 2
extern int smp_num_siblings;
extern unsigned int num_processors;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_sibling_map;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_core_map;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_llc_shared_map;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) cpu_llc_id;
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(int) cpu_number;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(cpu_llc_shared_map)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(cpu_llc_shared_map)))) *)((&(cpu_llc_shared_map)))); (typeof((typeof(*((&(cpu_llc_shared_map)))) *)((&(cpu_llc_shared_map))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_cpu_to_apicid; extern __typeof__(u16) *x86_cpu_to_apicid_early_ptr; extern __typeof__(u16) x86_cpu_to_apicid_early_map[];
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u32) x86_cpu_to_acpiid; extern __typeof__(u32) *x86_cpu_to_acpiid_early_ptr; extern __typeof__(u32) x86_cpu_to_acpiid_early_map[];
extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
struct task_struct;
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
void (*stop_other_cpus)(int wait);
void (*crash_stop_other_cpus)(void);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
void (*send_call_func_ipi)(const struct cpumask *mask);
void (*send_call_func_single_ipi)(int cpu);
};
extern void set_cpu_sibling_map(int cpu);
extern struct smp_ops smp_ops;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_send_stop(void)
{
smp_ops.stop_other_cpus(0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void stop_other_cpus(void)
{
smp_ops.stop_other_cpus(1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_prepare_boot_cpu(void)
{
smp_ops.smp_prepare_boot_cpu();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_prepare_cpus(unsigned int max_cpus)
{
smp_ops.smp_prepare_cpus(max_cpus);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_cpus_done(unsigned int max_cpus)
{
smp_ops.smp_cpus_done(max_cpus);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
return smp_ops.cpu_up(cpu, tidle);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __cpu_disable(void)
{
return smp_ops.cpu_disable();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpu_die(unsigned int cpu)
{
smp_ops.cpu_die(cpu);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void play_dead(void)
{
smp_ops.play_dead();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_send_call_function_single_ipi(int cpu)
{
smp_ops.send_call_func_single_ipi(cpu);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_ops.send_call_func_ipi(mask);
}
void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu);
void hlt_play_dead(void);
void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_boot_cpu_info(void);
void smp_store_cpu_info(int id);
void smp_reboot_interrupt(void);
void smp_reschedule_interrupt(struct pt_regs *regs);
void smp_call_function_interrupt(struct pt_regs *regs);
void smp_call_function_single_interrupt(struct pt_regs *r);
# 182 "./arch/x86/include/asm/smp.h"
extern unsigned disabled_cpus;
extern int hard_smp_processor_id(void);
# 69 "./include/linux/smp.h" 2
# 78 "./include/linux/smp.h"
extern void smp_send_stop(void);
extern void smp_send_reschedule(int cpu);
extern void smp_prepare_cpus(unsigned int max_cpus);
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
extern void smp_cpus_done(unsigned int max_cpus);
int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
void smp_prepare_boot_cpu(void);
extern unsigned int setup_max_cpus;
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) setup_nr_cpu_ids(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) smp_init(void);
extern int __boot_cpu_id;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_boot_cpu_id(void)
{
return __boot_cpu_id;
}
# 213 "./include/linux/smp.h"
extern void arch_disable_smp_support(void);
extern void arch_enable_nonboot_cpus_begin(void);
extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
bool phys);
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
# 8 "./include/linux/percpu.h" 2
# 71 "./include/linux/percpu.h"
extern void *pcpu_base_addr;
extern const unsigned long *pcpu_unit_offsets;
struct pcpu_group_info {
int nr_units;
unsigned long base_offset;
unsigned int *cpu_map;
};
struct pcpu_alloc_info {
size_t static_size;
size_t reserved_size;
size_t dyn_size;
size_t unit_size;
size_t atom_size;
size_t alloc_size;
size_t __ai_size;
int nr_groups;
struct pcpu_group_info groups[];
};
enum pcpu_fc {
PCPU_FC_AUTO,
PCPU_FC_EMBED,
PCPU_FC_PAGE,
PCPU_FC_NR,
};
extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
size_t align);
typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __attribute__((__section__(".init.text"))) __attribute__((__cold__)) pcpu_alloc_alloc_info(int nr_groups,
int nr_units);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
extern void *__alloc_reserved_percpu(size_t size, size_t align);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
extern void *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
# 152 "./include/linux/percpu.h"
extern unsigned long pcpu_nr_pages(void);
# 20 "./include/linux/hrtimer.h" 2
# 1 "./include/linux/timerqueue.h" 1
struct timerqueue_node {
struct rb_node node;
ktime_t expires;
};
struct timerqueue_head {
struct rb_root head;
struct timerqueue_node *next;
};
extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);
extern bool timerqueue_del(struct timerqueue_head *head,
struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
# 35 "./include/linux/timerqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function))
struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
return head->next;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void timerqueue_init(struct timerqueue_node *node)
{
((&node->node)->__rb_parent_color = (unsigned long)(&node->node));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void timerqueue_init_head(struct timerqueue_head *head)
{
head->head = (struct rb_root) { ((void *)0), };
head->next = ((void *)0);
}
# 22 "./include/linux/hrtimer.h" 2
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
# 36 "./include/linux/hrtimer.h"
enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x00,
HRTIMER_MODE_REL = 0x01,
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_SOFT = 0x04,
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
};
enum hrtimer_restart {
HRTIMER_NORESTART,
HRTIMER_RESTART,
};
# 107 "./include/linux/hrtimer.h"
struct hrtimer {
struct timerqueue_node node;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
u8 is_soft;
};
# 124 "./include/linux/hrtimer.h"
struct hrtimer_sleeper {
struct hrtimer timer;
struct task_struct *task;
};
# 147 "./include/linux/hrtimer.h"
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
seqcount_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
} __attribute__((__aligned__((1 << (6)))));
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
# 198 "./include/linux/hrtimer.h"
struct hrtimer_cpu_base {
raw_spinlock_t lock;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
softirq_activated : 1;
unsigned int nr_events;
unsigned short nr_retries;
unsigned short nr_hangs;
unsigned int max_hang_time;
ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} __attribute__((__aligned__((1 << (6)))));
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = time;
timer->_softexpires = time;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, delta);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
timer->node.expires = tv64;
timer->_softexpires = tv64;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = ktime_add_safe(timer->node.expires, time);
timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
{
timer->node.expires = ((timer->node.expires) + (ns));
timer->_softexpires = ((timer->_softexpires) + (ns));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
{
return timer->node.expires;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
{
return timer->_softexpires;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
return timer->node.expires;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
return timer->_softexpires;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->node.expires);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
return ((timer->node.expires) - (timer->base->get_time()));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 1 ?
timer->base->cpu_base->hres_active : 0;
}
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
# 312 "./include/linux/hrtimer.h"
extern void clock_was_set_delayed(void);
extern unsigned int hrtimer_resolution;
# 327 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
{
ktime_t rem = ((timer->node.expires) - (now));
if (0 && timer->is_rel)
rem -= hrtimer_resolution;
return rem;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
return __hrtimer_expires_remaining_adjusted(timer,
timer->base->get_time());
}
extern void clock_was_set(void);
extern void timerfd_clock_was_set(void);
extern void hrtimers_resume(void);
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device;
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_init_on_stack(struct hrtimer *timer,
clockid_t which_clock,
enum hrtimer_mode mode)
{
hrtimer_init(timer, which_clock, mode);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
# 392 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
hrtimer_start_range_ns(timer, tim, 0, mode);
}
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
u64 delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(((hard) - (soft)));
hrtimer_start_range_ns(timer, soft, delta, mode);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_restart(struct hrtimer *timer)
{
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}
extern u64 hrtimer_get_next_event(void);
extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hrtimer_is_queued(struct hrtimer *timer)
{
return timer->state & 0x01;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hrtimer_callback_running(struct hrtimer *timer)
{
return timer->base->running == timer;
}
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
# 467 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
return hrtimer_forward(timer, timer->base->get_time(), interval);
}
extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
extern void hrtimer_run_queues(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) hrtimers_init(void);
extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
int hrtimers_dead_cpu(unsigned int cpu);
# 21 "./include/linux/sched.h" 2
# 1 "./include/linux/seccomp.h" 1
# 1 "./include/uapi/linux/seccomp.h" 1
# 59 "./include/uapi/linux/seccomp.h"
struct seccomp_data {
int nr;
__u32 arch;
__u64 instruction_pointer;
__u64 args[6];
};
struct seccomp_notif_sizes {
__u16 seccomp_notif;
__u16 seccomp_notif_resp;
__u16 seccomp_data;
};
struct seccomp_notif {
__u64 id;
__u32 pid;
__u32 flags;
struct seccomp_data data;
};
struct seccomp_notif_resp {
__u64 id;
__s64 val;
__s32 error;
__u32 flags;
};
# 6 "./include/linux/seccomp.h" 2
# 15 "./include/linux/seccomp.h"
# 1 "./arch/x86/include/asm/seccomp.h" 1
# 1 "./arch/x86/include/asm/unistd.h" 1
# 1 "./arch/x86/include/uapi/asm/unistd.h" 1
# 6 "./arch/x86/include/asm/unistd.h" 2
# 24 "./arch/x86/include/asm/unistd.h"
# 1 "./arch/x86/include/generated/uapi/asm/unistd_64.h" 1
# 25 "./arch/x86/include/asm/unistd.h" 2
# 1 "./arch/x86/include/generated/asm/unistd_64_x32.h" 1
# 26 "./arch/x86/include/asm/unistd.h" 2
# 6 "./arch/x86/include/asm/seccomp.h" 2
# 1 "./arch/x86/include/asm/ia32_unistd.h" 1
# 10 "./arch/x86/include/asm/ia32_unistd.h"
# 1 "./arch/x86/include/generated/asm/unistd_32_ia32.h" 1
# 11 "./arch/x86/include/asm/ia32_unistd.h" 2
# 13 "./arch/x86/include/asm/seccomp.h" 2
# 1 "./include/asm-generic/seccomp.h" 1
# 14 "./include/asm-generic/seccomp.h"
# 1 "./include/uapi/linux/unistd.h" 1
# 15 "./include/asm-generic/seccomp.h" 2
# 34 "./include/asm-generic/seccomp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const int *get_compat_mode1_syscalls(void)
{
static const int mode1_syscalls_32[] = {
3, 4,
1, 119,
0,
};
return mode1_syscalls_32;
}
# 20 "./arch/x86/include/asm/seccomp.h" 2
# 16 "./include/linux/seccomp.h" 2
struct seccomp_filter;
# 29 "./include/linux/seccomp.h"
struct seccomp {
int mode;
struct seccomp_filter *filter;
};
extern int __secure_computing(const struct seccomp_data *sd);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int secure_computing(const struct seccomp_data *sd)
{
if (__builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 8)), 0))
return __secure_computing(sd);
return 0;
}
extern long prctl_get_seccomp(void);
extern long prctl_set_seccomp(unsigned long, void *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int seccomp_mode(struct seccomp *s)
{
return s->mode;
}
# 84 "./include/linux/seccomp.h"
extern void put_seccomp_filter(struct task_struct *tsk);
extern void get_seccomp_filter(struct task_struct *tsk);
# 103 "./include/linux/seccomp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long seccomp_get_filter(struct task_struct *task,
unsigned long n, void *data)
{
return -22;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long seccomp_get_metadata(struct task_struct *task,
unsigned long filter_off,
void *data)
{
return -22;
}
# 22 "./include/linux/sched.h" 2
# 1 "./include/linux/resource.h" 1
# 1 "./include/uapi/linux/resource.h" 1
# 24 "./include/uapi/linux/resource.h"
struct rusage {
struct timeval ru_utime;
struct timeval ru_stime;
__kernel_long_t ru_maxrss;
__kernel_long_t ru_ixrss;
__kernel_long_t ru_idrss;
__kernel_long_t ru_isrss;
__kernel_long_t ru_minflt;
__kernel_long_t ru_majflt;
__kernel_long_t ru_nswap;
__kernel_long_t ru_inblock;
__kernel_long_t ru_oublock;
__kernel_long_t ru_msgsnd;
__kernel_long_t ru_msgrcv;
__kernel_long_t ru_nsignals;
__kernel_long_t ru_nvcsw;
__kernel_long_t ru_nivcsw;
};
struct rlimit {
__kernel_ulong_t rlim_cur;
__kernel_ulong_t rlim_max;
};
struct rlimit64 {
__u64 rlim_cur;
__u64 rlim_max;
};
# 78 "./include/uapi/linux/resource.h"
# 1 "./arch/x86/include/uapi/asm/resource.h" 1
# 1 "./include/asm-generic/resource.h" 1
# 1 "./include/uapi/asm-generic/resource.h" 1
# 6 "./include/asm-generic/resource.h" 2
# 2 "./arch/x86/include/uapi/asm/resource.h" 2
# 79 "./include/uapi/linux/resource.h" 2
# 6 "./include/linux/resource.h" 2
struct task_struct;
void getrusage(struct task_struct *p, int who, struct rusage *ru);
int do_prlimit(struct task_struct *tsk, unsigned int resource,
struct rlimit *new_rlim, struct rlimit *old_rlim);
# 25 "./include/linux/sched.h" 2
# 1 "./include/linux/latencytop.h" 1
# 14 "./include/linux/latencytop.h"
struct task_struct;
# 46 "./include/linux/latencytop.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
account_scheduler_latency(struct task_struct *task, int usecs, int inter)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_all_latency_tracing(struct task_struct *p)
{
}
# 26 "./include/linux/sched.h" 2
# 1 "./include/linux/sched/prio.h" 1
# 48 "./include/linux/sched/prio.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long nice_to_rlimit(long nice)
{
return (19 - nice + 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long rlimit_to_nice(long prio)
{
return (19 - prio + 1);
}
# 27 "./include/linux/sched.h" 2
# 1 "./include/linux/signal_types.h" 1
# 10 "./include/linux/signal_types.h"
# 1 "./include/uapi/linux/signal.h" 1
# 1 "./arch/x86/include/asm/signal.h" 1
# 21 "./arch/x86/include/asm/signal.h"
typedef unsigned long old_sigset_t;
typedef struct {
unsigned long sig[(64 / 64)];
} sigset_t;
# 36 "./arch/x86/include/asm/signal.h"
# 1 "./arch/x86/include/uapi/asm/signal.h" 1
# 11 "./arch/x86/include/uapi/asm/signal.h"
struct siginfo;
# 94 "./arch/x86/include/uapi/asm/signal.h"
# 1 "./include/uapi/asm-generic/signal-defs.h" 1
# 18 "./include/uapi/asm-generic/signal-defs.h"
typedef void __signalfn_t(int);
typedef __signalfn_t *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t *__sigrestore_t;
# 95 "./arch/x86/include/uapi/asm/signal.h" 2
# 128 "./arch/x86/include/uapi/asm/signal.h"
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
# 37 "./arch/x86/include/asm/signal.h" 2
extern void do_signal(struct pt_regs *regs);
# 6 "./include/uapi/linux/signal.h" 2
# 1 "./arch/x86/include/uapi/asm/siginfo.h" 1
# 13 "./arch/x86/include/uapi/asm/siginfo.h"
# 1 "./include/uapi/asm-generic/siginfo.h" 1
typedef union sigval {
int sival_int;
void *sival_ptr;
} sigval_t;
# 32 "./include/uapi/asm-generic/siginfo.h"
union __sifields {
struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
} _kill;
struct {
__kernel_timer_t _tid;
int _overrun;
sigval_t _sigval;
int _sys_private;
} _timer;
struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
sigval_t _sigval;
} _rt;
struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
int _status;
__kernel_clock_t _utime;
__kernel_clock_t _stime;
} _sigchld;
struct {
void *_addr;
# 77 "./include/uapi/asm-generic/siginfo.h"
union {
short _addr_lsb;
struct {
char _dummy_bnd[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))];
void *_lower;
void *_upper;
} _addr_bnd;
struct {
char _dummy_pkey[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))];
__u32 _pkey;
} _addr_pkey;
};
} _sigfault;
struct {
long _band;
int _fd;
} _sigpoll;
struct {
void *_call_addr;
int _syscall;
unsigned int _arch;
} _sigsys;
};
# 129 "./include/uapi/asm-generic/siginfo.h"
typedef struct siginfo {
union {
struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; };
int _si_pad[128/sizeof(int)];
};
} siginfo_t;
# 320 "./include/uapi/asm-generic/siginfo.h"
typedef struct sigevent {
sigval_t sigev_value;
int sigev_signo;
int sigev_notify;
union {
int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
int _tid;
struct {
void (*_function)(sigval_t);
void *_attribute;
} _sigev_thread;
} _sigev_un;
} sigevent_t;
# 14 "./arch/x86/include/uapi/asm/siginfo.h" 2
# 7 "./include/uapi/linux/signal.h" 2
# 11 "./include/linux/signal_types.h" 2
typedef struct kernel_siginfo {
struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; };
} kernel_siginfo_t;
struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
struct user_struct *user;
};
struct sigpending {
struct list_head list;
sigset_t signal;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
__sigrestore_t sa_restorer;
sigset_t sa_mask;
};
struct k_sigaction {
struct sigaction sa;
};
# 65 "./include/linux/signal_types.h"
struct ksignal {
struct k_sigaction ka;
kernel_siginfo_t info;
int sig;
};
# 28 "./include/linux/sched.h" 2
# 1 "./include/linux/psi_types.h" 1
# 88 "./include/linux/psi_types.h"
struct psi_group { };
# 29 "./include/linux/sched.h" 2
# 1 "./include/linux/task_io_accounting.h" 1
# 12 "./include/linux/task_io_accounting.h"
struct task_io_accounting {
u64 rchar;
u64 wchar;
u64 syscr;
u64 syscw;
u64 read_bytes;
u64 write_bytes;
# 44 "./include/linux/task_io_accounting.h"
u64 cancelled_write_bytes;
};
# 31 "./include/linux/sched.h" 2
# 1 "./include/uapi/linux/rseq.h" 1
# 16 "./include/uapi/linux/rseq.h"
enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1,
RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
};
enum rseq_flags {
RSEQ_FLAG_UNREGISTER = (1 << 0),
};
enum rseq_cs_flags_bit {
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
};
enum rseq_cs_flags {
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
};
struct rseq_cs {
__u32 version;
__u32 flags;
__u64 start_ip;
__u64 post_commit_offset;
__u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
struct rseq {
# 75 "./include/uapi/linux/rseq.h"
__u32 cpu_id_start;
# 90 "./include/uapi/linux/rseq.h"
__u32 cpu_id;
# 109 "./include/uapi/linux/rseq.h"
union {
__u64 ptr64;
__u64 ptr;
# 124 "./include/uapi/linux/rseq.h"
} rseq_cs;
# 144 "./include/uapi/linux/rseq.h"
__u32 flags;
} __attribute__((aligned(4 * sizeof(__u64))));
# 32 "./include/linux/sched.h" 2
struct audit_context;
struct backing_dev_info;
struct bio_list;
struct blk_plug;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct sched_attr;
struct sched_param;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
# 210 "./include/linux/sched.h"
extern void scheduler_tick(void);
extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
void schedule(void);
extern void schedule_preempt_disabled(void);
extern int __attribute__((__warn_unused_result__)) io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
# 236 "./include/linux/sched.h"
struct prev_cputime {
u64 utime;
u64 stime;
raw_spinlock_t lock;
};
# 254 "./include/linux/sched.h"
struct task_cputime {
u64 utime;
u64 stime;
unsigned long long sum_exec_runtime;
};
enum vtime_state {
VTIME_INACTIVE = 0,
VTIME_USER,
VTIME_SYS,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
u64 utime;
u64 stime;
u64 gtime;
};
struct sched_info {
unsigned long pcount;
unsigned long long run_delay;
unsigned long long last_arrival;
unsigned long long last_queued;
};
# 314 "./include/linux/sched.h"
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
# 341 "./include/linux/sched.h"
struct util_est {
unsigned int enqueued;
unsigned int ewma;
} __attribute__((__aligned__(sizeof(u64))));
# 399 "./include/linux/sched.h"
struct sched_avg {
u64 last_update_time;
u64 load_sum;
u64 runnable_load_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
unsigned long runnable_load_avg;
unsigned long util_avg;
struct util_est util_est;
} __attribute__((__aligned__((1 << (6)))));
struct sched_statistics {
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;
u64 block_start;
u64 block_max;
u64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
};
struct sched_entity {
struct load_weight load;
unsigned long runnable_weight;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
u64 nr_migrations;
struct sched_statistics statistics;
int depth;
struct sched_entity *parent;
struct cfs_rq *cfs_rq;
struct cfs_rq *my_q;
# 480 "./include/linux/sched.h"
struct sched_avg avg;
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
unsigned short on_rq;
unsigned short on_list;
struct sched_rt_entity *back;
} ;
struct sched_dl_entity {
struct rb_node rb_node;
u64 dl_runtime;
u64 dl_deadline;
u64 dl_period;
u64 dl_bw;
u64 dl_density;
s64 runtime;
u64 deadline;
unsigned int flags;
# 549 "./include/linux/sched.h"
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
struct hrtimer dl_timer;
# 568 "./include/linux/sched.h"
struct hrtimer inactive_timer;
};
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_hint;
u8 pad;
} b;
u32 s;
};
enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
struct wake_q_node {
struct wake_q_node *next;
};
struct task_struct {
struct thread_info thread_info;
volatile long state;
void *stack;
atomic_t usage;
unsigned int flags;
unsigned int ptrace;
struct llist_node wake_entry;
int on_cpu;
unsigned int cpu;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
# 633 "./include/linux/sched.h"
int recent_used_cpu;
int wake_cpu;
int on_rq;
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
struct task_group *sched_task_group;
struct sched_dl_entity dl;
unsigned int btrace_seq;
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
# 679 "./include/linux/sched.h"
struct sched_info sched_info;
struct list_head tasks;
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
struct mm_struct *mm;
struct mm_struct *active_mm;
struct vmacache vmacache;
struct task_rss_stat rss_stat;
int exit_state;
int exit_code;
int exit_signal;
int pdeath_signal;
unsigned long jobctl;
unsigned int personality;
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
unsigned sched_remote_wakeup:1;
unsigned :0;
unsigned in_execve:1;
unsigned in_iowait:1;
unsigned restore_sigmask:1;
# 735 "./include/linux/sched.h"
unsigned no_cgroup_migration:1;
# 746 "./include/linux/sched.h"
unsigned int kernel_uaccess_faults_ok;
unsigned long atomic_flags;
struct restart_block restart_block;
pid_t pid;
pid_t tgid;
unsigned long stack_canary;
# 766 "./include/linux/sched.h"
struct task_struct *real_parent;
struct task_struct *parent;
struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;
struct list_head ptraced;
struct list_head ptrace_entry;
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done;
int *set_child_tid;
int *clear_child_tid;
u64 utime;
u64 stime;
u64 gtime;
struct prev_cputime prev_cputime;
# 817 "./include/linux/sched.h"
unsigned long nvcsw;
unsigned long nivcsw;
u64 start_time;
u64 real_start_time;
unsigned long min_flt;
unsigned long maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
const struct cred *ptracer_cred;
const struct cred *real_cred;
const struct cred *cred;
# 853 "./include/linux/sched.h"
char comm[16];
struct nameidata *nameidata;
struct sysv_sem sysvsem;
struct sysv_shm sysvshm;
struct fs_struct *fs;
struct files_struct *files;
struct nsproxy *nsproxy;
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked;
sigset_t real_blocked;
sigset_t saved_sigmask;
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;
struct callback_head *task_works;
struct audit_context *audit_context;
kuid_t loginuid;
unsigned int sessionid;
struct seccomp seccomp;
u32 parent_exec_id;
u32 self_exec_id;
spinlock_t alloc_lock;
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
struct rb_root_cached pi_waiters;
struct task_struct *pi_top_task;
struct rt_mutex_waiter *pi_blocked_on;
# 950 "./include/linux/sched.h"
void *journal_info;
struct bio_list *bio_list;
struct blk_plug *plug;
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
u64 acct_rss_mem1;
u64 acct_vm_mem1;
u64 acct_timexpd;
nodemask_t mems_allowed;
seqcount_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
struct css_set *cgroups;
struct list_head cg_list;
struct robust_list_head *robust_list;
struct compat_robust_list_head *compat_robust_list;
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
struct mempolicy *mempolicy;
short il_prev;
short pref_node_fork;
# 1067 "./include/linux/sched.h"
struct rseq *rseq;
u32 rseq_len;
u32 rseq_sig;
unsigned long rseq_event_mask;
struct tlbflush_unmap_batch tlb_ubc;
struct callback_head rcu;
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
struct task_delay_info *delays;
# 1098 "./include/linux/sched.h"
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when;
# 1111 "./include/linux/sched.h"
u64 timer_slack_ns;
u64 default_timer_slack_ns;
# 1141 "./include/linux/sched.h"
unsigned long trace;
unsigned long trace_recursion;
# 1178 "./include/linux/sched.h"
struct uprobe_task *utask;
# 1187 "./include/linux/sched.h"
int pagefault_disabled;
struct task_struct *oom_reaper_list;
struct vm_struct *stack_vm_area;
atomic_t stack_refcount;
void *security;
# 1218 "./include/linux/sched.h"
struct thread_struct thread;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
}
# 1244 "./include/linux/sched.h"
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
# 1277 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pid_alive(const struct task_struct *p)
{
return p->thread_pid != ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(({ typeof(*(tsk->real_parent)) *________p1 = (typeof(*(tsk->real_parent)) *)({ union { typeof((tsk->real_parent)) __val; char __c[1]; } __u; if (1) __read_once_size(&((tsk->real_parent)), __u.__c, sizeof((tsk->real_parent))); else __read_once_size_nocheck(&((tsk->real_parent)), __u.__c, sizeof((tsk->real_parent))); do { } while (0); __u.__val; }); do { } while (0); ; ((typeof(*(tsk->real_parent)) *)(________p1)); }), ns);
rcu_read_unlock();
return pid;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int task_state_index(struct task_struct *tsk)
{
unsigned int tsk_state = ({ union { typeof(tsk->state) __val; char __c[1]; } __u; if (1) __read_once_size(&(tsk->state), __u.__c, sizeof(tsk->state)); else __read_once_size_nocheck(&(tsk->state), __u.__c, sizeof(tsk->state)); do { } while (0); __u.__val; });
unsigned int state = (tsk_state | tsk->exit_state) & (0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040);
do { extern void __compiletime_assert_1344(void) ; if (!(!(((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) == 0 || ((((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) - 1)) != 0)))) __compiletime_assert_1344(); } while (0);
if (tsk_state == (0x0002 | 0x0400))
state = ((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1);
return fls(state);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
do { extern void __compiletime_assert_1356(void) ; if (!(!(1 + ( __builtin_constant_p((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ? ( __builtin_constant_p((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ? ( ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) < 2 ? 0 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 63) ? 63 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 62) ? 62 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 61) ? 61 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 60) ? 60 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 59) ? 59 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 58) ? 58 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 57) ? 57 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 56) ? 56 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 55) ? 55 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 54) ? 54 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 53) ? 53 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 52) ? 52 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 51) ? 51 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 50) ? 50 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 49) ? 49 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 48) ? 48 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 47) ? 47 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 46) ? 46 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 45) ? 45 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 44) ? 44 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 43) ? 43 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 42) ? 42 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 41) ? 41 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 40) ? 40 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 39) ? 39 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 38) ? 38 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 37) ? 37 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 36) ? 36 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 35) ? 35 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 34) ? 34 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 33) ? 33 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 32) ? 32 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 31) ? 31 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 30) ? 30 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 29) ? 29 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 28) ? 28 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 27) ? 27 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 26) ? 26 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 25) ? 25 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 24) ? 24 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 23) ? 23 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 22) ? 22 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 21) ? 21 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 20) ? 20 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 19) ? 19 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 18) ? 18 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 17) ? 17 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 16) ? 16 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 15) ? 15 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 14) ? 14 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 13) ? 13 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 12) ? 12 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 11) ? 11 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 10) ? 10 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 9) ? 9 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 8) ? 8 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 7) ? 7 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 6) ? 6 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 5) ? 5 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 4) ? 4 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 3) ? 3 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) <= 4) ? __ilog2_u32((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) : __ilog2_u64((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ) != sizeof(state_char) - 1))) __compiletime_assert_1356(); } while (0);
return state_char[state];
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char task_state_to_char(struct task_struct *tsk)
{
return task_index_to_char(task_state_index(tsk));
}
# 1375 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_global_init(struct task_struct *tsk)
{
return task_tgid_nr(tsk) == 1;
}
extern struct pid *cad_pid;
# 1444 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_percpu_thread(void)
{
return (get_current()->flags & 0x04000000) &&
(get_current()->nr_cpus_allowed == 1);
}
# 1475 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_no_new_privs(struct task_struct *p) { return (__builtin_constant_p((0)) ? constant_test_bit((0), (&p->atomic_flags)) : variable_test_bit((0), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_no_new_privs(struct task_struct *p) { set_bit(0, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spread_page(struct task_struct *p) { return (__builtin_constant_p((1)) ? constant_test_bit((1), (&p->atomic_flags)) : variable_test_bit((1), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spread_page(struct task_struct *p) { set_bit(1, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spread_page(struct task_struct *p) { clear_bit(1, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spread_slab(struct task_struct *p) { return (__builtin_constant_p((2)) ? constant_test_bit((2), (&p->atomic_flags)) : variable_test_bit((2), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spread_slab(struct task_struct *p) { set_bit(2, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spread_slab(struct task_struct *p) { clear_bit(2, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ssb_disable(struct task_struct *p) { return (__builtin_constant_p((3)) ? constant_test_bit((3), (&p->atomic_flags)) : variable_test_bit((3), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ssb_disable(struct task_struct *p) { set_bit(3, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spec_ssb_disable(struct task_struct *p) { clear_bit(3, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ssb_force_disable(struct task_struct *p) { return (__builtin_constant_p((4)) ? constant_test_bit((4), (&p->atomic_flags)) : variable_test_bit((4), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ssb_force_disable(struct task_struct *p) { set_bit(4, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ib_disable(struct task_struct *p) { return (__builtin_constant_p((5)) ? constant_test_bit((5), (&p->atomic_flags)) : variable_test_bit((5), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ib_disable(struct task_struct *p) { set_bit(5, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spec_ib_disable(struct task_struct *p) { clear_bit(5, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ib_force_disable(struct task_struct *p) { return (__builtin_constant_p((6)) ? constant_test_bit((6), (&p->atomic_flags)) : variable_test_bit((6), (&p->atomic_flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ib_force_disable(struct task_struct *p) { set_bit(6, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
get_current()->flags &= ~flags;
get_current()->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
# 1528 "./include/linux/sched.h"
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int task_nice(const struct task_struct *p)
{
return (((p)->static_prio) - (100 + (19 - -20 + 1) / 2));
}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & 0x00000002);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
struct task_struct task;
unsigned long stack[(((1UL) << 12) << (2 + 0))/sizeof(long)];
};
extern unsigned long init_stack[(((1UL) << 12) << (2 + 0)) / sizeof(unsigned long)];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct thread_info *task_thread_info(struct task_struct *task)
{
return &task->thread_info;
}
# 1605 "./include/linux/sched.h"
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
extern void kick_process(struct task_struct *tsk);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
# 1651 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_tsk_thread_flag(struct task_struct *tsk, int flag,
bool value)
{
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,3);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,3);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_need_resched(struct task_struct *tsk)
{
return __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 0);
}
# 1704 "./include/linux/sched.h"
extern int _cond_resched(void);
# 1714 "./include/linux/sched.h"
extern int __cond_resched_lock(spinlock_t *lock);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cond_resched_rcu(void)
{
rcu_read_unlock();
({ ___might_sleep("./include/linux/sched.h", 1725, 0); _cond_resched(); });
rcu_read_lock();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int spin_needbreak(spinlock_t *lock)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool need_resched(void)
{
return __builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int task_cpu(const struct task_struct *p)
{
return p->cpu;
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
# 1790 "./include/linux/sched.h"
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
# 1803 "./include/linux/sched.h"
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};
enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, 1);
}
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (get_current()->rseq)
__rseq_handle_notify_resume(ksig, regs);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
__asm__ __volatile__("" : : : "memory");
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &get_current()->rseq_event_mask);
__asm__ __volatile__("" : : : "memory");
rseq_handle_notify_resume(ksig, regs);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_preempt(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_migrate(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & 0x00010000) {
t->rseq = ((void *)0);
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = get_current()->rseq;
t->rseq_len = get_current()->rseq_len;
t->rseq_sig = get_current()->rseq_sig;
t->rseq_event_mask = get_current()->rseq_event_mask;
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_execve(struct task_struct *t)
{
t->rseq = ((void *)0);
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
# 1908 "./include/linux/sched.h"
void __exit_umh(struct task_struct *tsk);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exit_umh(struct task_struct *tsk)
{
if (__builtin_expect(!!(tsk->flags & 0x02000000), 0))
__exit_umh(tsk);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_syscall(struct pt_regs *regs)
{
}
# 8 "arch/x86/mm/fault.c" 2
# 1 "./include/linux/sched/task_stack.h" 1
# 10 "./include/linux/sched/task_stack.h"
# 1 "./include/uapi/linux/magic.h" 1
# 11 "./include/linux/sched/task_stack.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *end_of_stack(const struct task_struct *task)
{
return task->stack;
}
# 62 "./include/linux/sched/task_stack.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *try_get_task_stack(struct task_struct *tsk)
{
return atomic_add_unless((&tsk->stack_refcount), 1, 0) ?
task_stack_page(tsk) : ((void *)0);
}
extern void put_task_stack(struct task_struct *tsk);
# 81 "./include/linux/sched/task_stack.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(get_current());
return (obj >= stack) && (obj < (stack + (((1UL) << 12) << (2 + 0))));
}
extern void thread_stack_cache_init(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do {
n++;
} while (!*n);
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
extern void set_task_stack_end_magic(struct task_struct *tsk);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kstack_end(void *addr)
{
return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << (2 + 0))-sizeof(void*)));
}
# 9 "arch/x86/mm/fault.c" 2
# 1 "./include/linux/kdebug.h" 1
# 1 "./arch/x86/include/asm/kdebug.h" 1
struct pt_regs;
enum die_val {
DIE_OOPS = 1,
DIE_INT3,
DIE_DEBUG,
DIE_PANIC,
DIE_NMI,
DIE_DIE,
DIE_KERNELDEBUG,
DIE_TRAP,
DIE_GPF,
DIE_CALL,
DIE_PAGE_FAULT,
DIE_NMIUNKNOWN,
};
enum show_regs_mode {
SHOW_REGS_SHORT,
SHOW_REGS_USER,
SHOW_REGS_ALL
};
extern void die(const char *, struct pt_regs *,long);
extern int __attribute__((__warn_unused_result__)) __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
extern void show_iret_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
# 6 "./include/linux/kdebug.h" 2
struct notifier_block;
struct die_args {
struct pt_regs *regs;
const char *str;
long err;
int trapnr;
int signr;
};
int register_die_notifier(struct notifier_block *nb);
int unregister_die_notifier(struct notifier_block *nb);
int notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig);
# 10 "arch/x86/mm/fault.c" 2
# 1 "./include/linux/extable.h" 1
struct module;
struct exception_table_entry;
const struct exception_table_entry *
search_extable(const struct exception_table_entry *base,
const size_t num,
unsigned long value);
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish);
void sort_main_extable(void);
void trim_init_extable(struct module *m);
const struct exception_table_entry *search_exception_tables(unsigned long add);
const struct exception_table_entry *search_module_extables(unsigned long addr);
# 11 "arch/x86/mm/fault.c" 2
# 1 "./include/linux/memblock.h" 1
# 17 "./include/linux/memblock.h"
# 1 "./include/linux/mm.h" 1
# 10 "./include/linux/mm.h"
# 1 "./include/linux/gfp.h" 1
# 1 "./include/linux/mmzone.h" 1
# 18 "./include/linux/mmzone.h"
# 1 "./include/linux/pageblock-flags.h" 1
# 30 "./include/linux/pageblock-flags.h"
enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,
PB_migrate_skip,
NR_PAGEBLOCK_BITS
};
# 67 "./include/linux/pageblock-flags.h"
struct page;
unsigned long get_pfnblock_flags_mask(struct page *page,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask);
void set_pfnblock_flags_mask(struct page *page,
unsigned long flags,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask);
# 19 "./include/linux/mmzone.h" 2
# 39 "./include/linux/mmzone.h"
enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES,
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
# 64 "./include/linux/mmzone.h"
MIGRATE_TYPES
};
extern const char * const migratetype_names[MIGRATE_TYPES];
# 78 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_migrate_movable(int mt)
{
return false || mt == MIGRATE_MOVABLE;
}
extern int page_group_by_mobility_disabled;
# 96 "./include/linux/mmzone.h"
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
struct pglist_data;
# 110 "./include/linux/mmzone.h"
struct zone_padding {
char x[0];
} __attribute__((__aligned__(1 << (6))));
enum numa_stat_item {
NUMA_HIT,
NUMA_MISS,
NUMA_FOREIGN,
NUMA_INTERLEAVE_HIT,
NUMA_LOCAL,
NUMA_OTHER,
NR_VM_NUMA_STAT_ITEMS
};
enum zone_stat_item {
NR_FREE_PAGES,
NR_ZONE_LRU_BASE,
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
NR_ZONE_INACTIVE_FILE,
NR_ZONE_ACTIVE_FILE,
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING,
NR_MLOCK,
NR_PAGETABLE,
NR_KERNEL_STACK_KB,
NR_BOUNCE,
NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE,
NR_ACTIVE_ANON,
NR_INACTIVE_FILE,
NR_ACTIVE_FILE,
NR_UNEVICTABLE,
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON,
NR_ISOLATED_FILE,
WORKINGSET_NODES,
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
WORKINGSET_RESTORE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED,
NR_FILE_MAPPED,
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
NR_WRITEBACK_TEMP,
NR_SHMEM,
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
NR_ANON_THPS,
NR_UNSTABLE_NFS,
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE,
NR_DIRTIED,
NR_WRITTEN,
NR_KERNEL_MISC_RECLAIMABLE,
NR_VM_NODE_STAT_ITEMS
};
# 202 "./include/linux/mmzone.h"
enum lru_list {
LRU_INACTIVE_ANON = 0,
LRU_ACTIVE_ANON = 0 + 1,
LRU_INACTIVE_FILE = 0 + 2,
LRU_ACTIVE_FILE = 0 + 2 + 1,
LRU_UNEVICTABLE,
NR_LRU_LISTS
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
struct zone_reclaim_stat {
# 234 "./include/linux/mmzone.h"
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];
};
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
atomic_long_t inactive_age;
unsigned long refaults;
};
# 263 "./include/linux/mmzone.h"
typedef unsigned isolate_mode_t;
enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
NR_WMARK
};
struct per_cpu_pages {
int count;
int high;
int batch;
struct list_head lists[MIGRATE_PCPTYPES];
};
struct per_cpu_pageset {
struct per_cpu_pages pcp;
s8 expire;
u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
};
struct per_cpu_nodestat {
s8 stat_threshold;
s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};
enum zone_type {
# 325 "./include/linux/mmzone.h"
ZONE_DMA,
ZONE_DMA32,
ZONE_NORMAL,
# 352 "./include/linux/mmzone.h"
ZONE_MOVABLE,
__MAX_NR_ZONES
};
struct zone {
unsigned long _watermark[NR_WMARK];
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
# 380 "./include/linux/mmzone.h"
long lowmem_reserve[4];
int node;
struct pglist_data *zone_pgdat;
struct per_cpu_pageset *pageset;
# 397 "./include/linux/mmzone.h"
unsigned long zone_start_pfn;
# 434 "./include/linux/mmzone.h"
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
const char *name;
# 454 "./include/linux/mmzone.h"
int initialized;
struct zone_padding _pad1_;
struct free_area free_area[11];
unsigned long flags;
spinlock_t lock;
struct zone_padding _pad2_;
unsigned long percpu_drift_mark;
unsigned long compact_cached_free_pfn;
unsigned long compact_cached_migrate_pfn[2];
# 491 "./include/linux/mmzone.h"
unsigned int compact_considered;
unsigned int compact_defer_shift;
int compact_order_failed;
bool compact_blockskip_flush;
bool contiguous;
struct zone_padding _pad3_;
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
} __attribute__((__aligned__(1 << (6))));
enum pgdat_flags {
PGDAT_CONGESTED,
PGDAT_DIRTY,
PGDAT_WRITEBACK,
PGDAT_RECLAIM_LOCKED,
};
enum zone_flags {
ZONE_BOOSTED_WATERMARK,
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_is_initialized(struct zone *zone)
{
return zone->initialized;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_is_empty(struct zone *zone)
{
return zone->spanned_pages == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_intersects(struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
return false;
if (start_pfn >= zone_end_pfn(zone) ||
start_pfn + nr_pages <= zone->zone_start_pfn)
return false;
return true;
}
# 580 "./include/linux/mmzone.h"
enum {
ZONELIST_FALLBACK,
ZONELIST_NOFALLBACK,
MAX_ZONELISTS
};
struct zoneref {
struct zone *zone;
int zone_idx;
};
# 615 "./include/linux/mmzone.h"
struct zonelist {
struct zoneref _zonerefs[((1 << 6) * 4) + 1];
};
extern struct page *mem_map;
# 632 "./include/linux/mmzone.h"
struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[4];
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
# 656 "./include/linux/mmzone.h"
unsigned long node_start_pfn;
unsigned long node_present_pages;
unsigned long node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
unsigned long totalreserve_pages;
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
# 709 "./include/linux/mmzone.h"
struct lruvec lruvec;
unsigned long flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
} pg_data_t;
# 731 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *zone_lru_lock(struct zone *zone)
{
return &zone->zone_pgdat->lru_lock;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct lruvec *node_lruvec(struct pglist_data *pgdat)
{
return &pgdat->lruvec;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgdat_is_empty(pg_data_t *pgdat)
{
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
# 1 "./include/linux/memory_hotplug.h" 1
# 1 "./include/linux/mmzone.h" 1
# 6 "./include/linux/memory_hotplug.h" 2
struct page;
struct zone;
struct pglist_data;
struct mem_section;
struct memory_block;
struct resource;
struct vmem_altmap;
# 230 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zone_span_seqretry(struct zone *zone, unsigned iv)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_span_writelock(struct zone *zone) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_span_writeunlock(struct zone *zone) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_seqlock_init(struct zone *zone) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mhp_notimplemented(const char *func)
{
printk("\001" "4" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
dump_stack();
return -38;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int try_online_node(int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_online_mems(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_online_mems(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_hotplug_begin(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_hotplug_done(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool movable_node_is_enabled(void)
{
return false;
}
# 293 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgdat_resize_init(struct pglist_data *pgdat) {}
# 307 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_mem_section_removable(unsigned long pfn,
unsigned long nr_pages)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void try_offline_node(int nid) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return -22;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void remove_memory(int nid, u64 start, u64 size) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __remove_memory(int nid, u64 start, u64 size) {}
extern void __attribute__((__section__(".ref.text"))) __attribute__((__noinline__)) free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
extern int arch_add_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_one_section(int nid, unsigned long start_pfn,
struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
int online_type);
extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
unsigned long nr_pages);
# 752 "./include/linux/mmzone.h" 2
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
};
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
extern void lruvec_init(struct lruvec *lruvec);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
{
return ({ void *__mptr = (void *)(lruvec); do { extern void __compiletime_assert_778(void) ; if (!(!(!__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct pglist_data *)0)->lruvec)) && !__builtin_types_compatible_p(typeof(*(lruvec)), typeof(void))))) __compiletime_assert_778(); } while (0); ((struct pglist_data *)(__mptr - __builtin_offsetof(struct pglist_data, lruvec))); });
}
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
void memory_present(int nid, unsigned long start, unsigned long end);
void memblocks_present(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int local_memory_node(int node_id) { return node_id; };
# 813 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_dev_zone(const struct zone *zone)
{
return false;
}
# 825 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool managed_zone(struct zone *zone)
{
return zone_managed_pages(zone);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool populated_zone(struct zone *zone)
{
return zone->present_pages;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zone_to_nid(struct zone *zone)
{
return zone->node;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_set_nid(struct zone *zone, int nid)
{
zone->node = nid;
}
# 855 "./include/linux/mmzone.h"
extern int movable_zone;
# 868 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_highmem_idx(enum zone_type idx)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_highmem(struct zone *zone)
{
return 0;
}
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[4];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int numa_zonelist_order_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern char numa_zonelist_order[];
# 924 "./include/linux/mmzone.h"
# 1 "./arch/x86/include/asm/mmzone.h" 1
# 1 "./arch/x86/include/asm/mmzone_64.h" 1
# 13 "./arch/x86/include/asm/mmzone_64.h"
extern struct pglist_data *node_data[];
# 6 "./arch/x86/include/asm/mmzone.h" 2
# 925 "./include/linux/mmzone.h" 2
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
# 960 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zonelist_zone_idx(struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zonelist_node_idx(struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
struct zoneref *__next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes);
# 991 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
if (__builtin_expect(!!(!nodes && zonelist_zone_idx(z) <= highest_zoneidx), 1))
return z;
return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
# 1016 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
return next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
}
# 1100 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pfn_to_section_nr(unsigned long pfn)
{
return pfn >> (27 - 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long section_nr_to_pfn(unsigned long sec)
{
return sec << (27 - 12);
}
struct page;
struct page_ext;
struct mem_section {
# 1127 "./include/linux/mmzone.h"
unsigned long section_mem_map;
unsigned long *pageblock_flags;
# 1143 "./include/linux/mmzone.h"
};
# 1156 "./include/linux/mmzone.h"
extern struct mem_section **mem_section;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_section *__nr_to_section(unsigned long nr)
{
if (!mem_section)
return ((void *)0);
if (!mem_section[((nr) / (((1UL) << 12) / sizeof (struct mem_section)))])
return ((void *)0);
return &mem_section[((nr) / (((1UL) << 12) / sizeof (struct mem_section)))][nr & ((((1UL) << 12) / sizeof (struct mem_section)) - 1)];
}
extern int __section_nr(struct mem_section* ms);
extern unsigned long usemap_size(void);
# 1194 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *__section_mem_map_addr(struct mem_section *section)
{
unsigned long map = section->section_mem_map;
map &= (~((1UL<<3)-1));
return (struct page *)map;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int present_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<0)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int present_section_nr(unsigned long nr)
{
return present_section(__nr_to_section(nr));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<1)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int online_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<2)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
}
# 1238 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
}
extern int __highest_present_section_nr;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pfn_valid(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= (1UL << ((0 ? 52 : 46) - 27)))
return 0;
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pfn_present(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= (1UL << ((0 ? 52 : 46) - 27)))
return 0;
return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
# 1277 "./include/linux/mmzone.h"
void sparse_init(void);
# 1288 "./include/linux/mmzone.h"
struct mminit_pfnnid_cache {
unsigned long last_start;
unsigned long last_end;
int last_nid;
};
void memory_present(int nid, unsigned long start, unsigned long end);
# 1336 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone)
{
return true;
}
# 7 "./include/linux/gfp.h" 2
# 1 "./include/linux/topology.h" 1
# 45 "./include/linux/topology.h"
int arch_update_cpu_topology(void);
# 67 "./include/linux/topology.h"
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) numa_node;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int numa_node_id(void)
{
return ({ typeof(numa_node) pscr_ret__; do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(numa_node)) { case 1: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm volatile("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm volatile("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm volatile("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm volatile("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpu_to_node(int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(numa_node)))) *)((&(numa_node)))); (typeof((typeof(*((&(numa_node)))) *)((&(numa_node))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_numa_node(int node)
{
do { do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(numa_node)) { case 1: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_cpu_numa_node(int cpu, int node)
{
(*({ do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(numa_node)))) *)((&(numa_node)))); (typeof((typeof(*((&(numa_node)))) *)((&(numa_node))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })) = node;
}
# 162 "./include/linux/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int numa_mem_id(void)
{
return numa_node_id();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int node_to_mem_node(int node)
{
return node;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpu_to_mem(int cpu)
{
return cpu_to_node(cpu);
}
# 198 "./include/linux/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *cpu_smt_mask(int cpu)
{
return ((*({ do { const void *__vpp_verify = (typeof((&(cpu_sibling_map)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(cpu_sibling_map)))) *)((&(cpu_sibling_map)))); (typeof((typeof(*((&(cpu_sibling_map)))) *)((&(cpu_sibling_map))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
# 10 "./include/linux/gfp.h" 2
struct vm_area_struct;
# 308 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
((void)(sizeof(( long)((gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) == ((( gfp_t)0x10u)|(( gfp_t)0x08u))))));
do { extern void __compiletime_assert_311(void) ; if (!(!((1UL << 3) != 0x08u))) __compiletime_assert_311(); } while (0);
do { extern void __compiletime_assert_312(void) ; if (!(!((0x08u >> 3) != MIGRATE_MOVABLE))) __compiletime_assert_312(); } while (0);
if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
return MIGRATE_UNMOVABLE;
return (gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) >> 3;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & (( gfp_t)0x200000u));
}
# 418 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u)));
z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << 0x01u * 2) | (ZONE_NORMAL << 0x02u * 2) | (ZONE_DMA32 << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_DMA << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_DMA32 << (0x08u | 0x04u) * 2)) >> (bit * 2)) &
((1 << 2) - 1);
((void)(sizeof(( long)((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1))));
return z;
}
# 436 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gfp_zonelist(gfp_t flags)
{
if (__builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0))
return ZONELIST_NOFALLBACK;
return ZONELIST_FALLBACK;
}
# 454 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zonelist *node_zonelist(int nid, gfp_t flags)
{
return (node_data[nid])->node_zonelists + gfp_zonelist(flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_free_page(struct page *page, int order) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_alloc_page(struct page *page, int order) { }
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
{
return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
((void)(sizeof(( long)(nid < 0 || nid >= (1 << 6)))));
((void)(sizeof(( long)((gfp_mask & (( gfp_t)0x40000u)) && !node_state((nid), N_ONLINE)))));
return __alloc_pages(gfp_mask, order, nid);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
if (nid == (-1))
nid = numa_mem_id();
return __alloc_pages_node(nid, gfp_mask, order);
}
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *
alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage);
# 530 "./include/linux/gfp.h"
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
void * __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_unref_page(struct page *page);
extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
extern void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask);
extern void page_frag_free(void *addr);
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
# 571 "./include/linux/gfp.h"
extern gfp_t gfp_allowed_mask;
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
extern bool pm_suspended_storage(void);
# 11 "./include/linux/mm.h" 2
# 20 "./include/linux/mm.h"
# 1 "./include/linux/percpu-refcount.h" 1
# 59 "./include/linux/percpu-refcount.h"
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
enum {
__PERCPU_REF_ATOMIC = 1LU << 0,
__PERCPU_REF_DEAD = 1LU << 1,
__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
__PERCPU_REF_FLAG_BITS = 2,
};
enum {
PERCPU_REF_INIT_ATOMIC = 1 << 0,
PERCPU_REF_INIT_DEAD = 1 << 1,
};
struct percpu_ref {
atomic_long_t count;
unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch;
bool force_atomic:1;
struct callback_head rcu;
};
int __attribute__((__warn_unused_result__)) percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release, unsigned int flags,
gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
# 126 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_kill(struct percpu_ref *ref)
{
percpu_ref_kill_and_confirm(ref, ((void *)0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long **percpu_countp)
{
unsigned long percpu_ptr;
# 153 "./include/linux/percpu-refcount.h"
percpu_ptr = ({ union { typeof(ref->percpu_count_ptr) __val; char __c[1]; } __u; if (1) __read_once_size(&(ref->percpu_count_ptr), __u.__c, sizeof(ref->percpu_count_ptr)); else __read_once_size_nocheck(&(ref->percpu_count_ptr), __u.__c, sizeof(ref->percpu_count_ptr)); do { } while (0); __u.__val; });
if (__builtin_expect(!!(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD), 0))
return false;
*percpu_countp = (unsigned long *)percpu_ptr;
return true;
}
# 177 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else
atomic_long_add(nr, &ref->count);
rcu_read_unlock_sched();
}
# 199 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_get(struct percpu_ref *ref)
{
percpu_ref_get_many(ref, 1);
}
# 213 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_tryget(struct percpu_ref *ref)
{
unsigned long *percpu_count;
bool ret;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count)) {
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
ret = true;
} else {
ret = atomic64_add_unless(((atomic64_t *)(&ref->count)), 1, 0);
}
rcu_read_unlock_sched();
return ret;
}
# 247 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
unsigned long *percpu_count;
bool ret = false;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count)) {
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic64_add_unless(((atomic64_t *)(&ref->count)), 1, 0);
}
rcu_read_unlock_sched();
return ret;
}
# 276 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else if (__builtin_expect(!!(atomic_long_sub_and_test(nr, &ref->count)), 0))
ref->release(ref);
rcu_read_unlock_sched();
}
# 299 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_put(struct percpu_ref *ref)
{
percpu_ref_put_many(ref, 1);
}
# 313 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_is_dying(struct percpu_ref *ref)
{
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
# 326 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_is_zero(struct percpu_ref *ref)
{
unsigned long *percpu_count;
if (__ref_is_percpu(ref, &percpu_count))
return false;
return !atomic_long_read(&ref->count);
}
# 21 "./include/linux/mm.h" 2
# 1 "./include/linux/bit_spinlock.h" 1
# 16 "./include/linux/bit_spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bit_spin_lock(int bitnum, unsigned long *addr)
{
__asm__ __volatile__("" : : : "memory");
while (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) {
__asm__ __volatile__("" : : : "memory");
do {
cpu_relax();
} while ((__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr))));
__asm__ __volatile__("" : : : "memory");
}
(void)0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bit_spin_trylock(int bitnum, unsigned long *addr)
{
__asm__ __volatile__("" : : : "memory");
if (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) {
__asm__ __volatile__("" : : : "memory");
return 0;
}
(void)0;
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bit_spin_unlock(int bitnum, unsigned long *addr)
{
clear_bit_unlock(bitnum, addr);
__asm__ __volatile__("" : : : "memory");
(void)0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
__clear_bit_unlock(bitnum, addr);
__asm__ __volatile__("" : : : "memory");
(void)0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
return (__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)));
}
# 22 "./include/linux/mm.h" 2
# 1 "./include/linux/shrinker.h" 1
# 12 "./include/linux/shrinker.h"
struct shrink_control {
gfp_t gfp_mask;
int nid;
unsigned long nr_to_scan;
unsigned long nr_scanned;
struct mem_cgroup *memcg;
};
# 60 "./include/linux/shrinker.h"
struct shrinker {
unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
long batch;
int seeks;
unsigned flags;
struct list_head list;
atomic_long_t *nr_deferred;
};
extern int prealloc_shrinker(struct shrinker *shrinker);
extern void register_shrinker_prepared(struct shrinker *shrinker);
extern int register_shrinker(struct shrinker *shrinker);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
# 23 "./include/linux/mm.h" 2
# 1 "./include/linux/page_ext.h" 1
# 1 "./include/linux/stacktrace.h" 1
struct task_struct;
struct pt_regs;
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
int skip;
};
extern void save_stack_trace(struct stack_trace *trace);
extern void save_stack_trace_regs(struct pt_regs *regs,
struct stack_trace *trace);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
extern int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces);
extern void save_stack_trace_user(struct stack_trace *trace);
# 7 "./include/linux/page_ext.h" 2
# 1 "./include/linux/stackdepot.h" 1
# 24 "./include/linux/stackdepot.h"
typedef u32 depot_stack_handle_t;
struct stack_trace;
depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
# 8 "./include/linux/page_ext.h" 2
struct pglist_data;
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
};
# 56 "./include/linux/page_ext.h"
struct page_ext;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page_ext *lookup_page_ext(const struct page *page)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ext_init(void)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ext_init_flatmem(void)
{
}
# 25 "./include/linux/mm.h" 2
# 1 "./include/linux/page_ref.h" 1
# 1 "./include/linux/page-flags.h" 1
# 70 "./include/linux/page-flags.h"
enum pageflags {
PG_locked,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
PG_workingset,
PG_waiters,
PG_error,
PG_slab,
PG_owner_priv_1,
PG_arch_1,
PG_reserved,
PG_private,
PG_private_2,
PG_writeback,
PG_head,
PG_mappedtodisk,
PG_reclaim,
PG_swapbacked,
PG_unevictable,
PG_mlocked,
PG_uncached,
# 105 "./include/linux/page-flags.h"
__NR_PAGEFLAGS,
PG_checked = PG_owner_priv_1,
PG_swapcache = PG_owner_priv_1,
PG_fscache = PG_private_2,
PG_pinned = PG_owner_priv_1,
PG_savepinned = PG_dirty,
PG_foreign = PG_owner_priv_1,
PG_slob_free = PG_private,
PG_double_map = PG_private_2,
PG_isolated = PG_reclaim,
};
struct page;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *compound_head(struct page *page)
{
unsigned long head = ({ union { typeof(page->compound_head) __val; char __c[1]; } __u; if (1) __read_once_size(&(page->compound_head), __u.__c, sizeof(page->compound_head)); else __read_once_size_nocheck(&(page->compound_head), __u.__c, sizeof(page->compound_head)); do { } while (0); __u.__val; });
if (__builtin_expect(!!(head & 1), 0))
return (struct page *) (head - 1);
return page;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageTail(struct page *page)
{
return ({ union { typeof(page->compound_head) __val; char __c[1]; } __u; if (1) __read_once_size(&(page->compound_head), __u.__c, sizeof(page->compound_head)); else __read_once_size_nocheck(&(page->compound_head), __u.__c, sizeof(page->compound_head)); do { } while (0); __u.__val; }) & 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageCompound(struct page *page)
{
return (__builtin_constant_p((PG_head)) ? constant_test_bit((PG_head), (&page->flags)) : variable_test_bit((PG_head), (&page->flags))) || PageTail(page);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PagePoisoned(const struct page *page)
{
return page->flags == -1l;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_init_poison(struct page *page, size_t size)
{
}
# 281 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageLocked(struct page *page) { return (__builtin_constant_p((PG_locked)) ? constant_test_bit((PG_locked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_locked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageLocked(struct page *page) { __set_bit(PG_locked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageLocked(struct page *page) { __clear_bit(PG_locked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWaiters(struct page *page) { return (__builtin_constant_p((PG_waiters)) ? constant_test_bit((PG_waiters), (&({ ((void)(sizeof(( long)(PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_waiters), (&({ ((void)(sizeof(( long)(PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageWaiters(struct page *page) { set_bit(PG_waiters, &({ ((void)(sizeof(( long)(PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageWaiters(struct page *page) { clear_bit(PG_waiters, &({ ((void)(sizeof(( long)(PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageWaiters(struct page *page) { __clear_bit(PG_waiters, &({ ((void)(sizeof(( long)(PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageError(struct page *page) { return (__builtin_constant_p((PG_error)) ? constant_test_bit((PG_error), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_error), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageError(struct page *page) { set_bit(PG_error, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageError(struct page *page) { clear_bit(PG_error, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReferenced(struct page *page) { return (__builtin_constant_p((PG_referenced)) ? constant_test_bit((PG_referenced), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_referenced), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageReferenced(struct page *page) { __set_bit(PG_referenced, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageDirty(struct page *page) { return (__builtin_constant_p((PG_dirty)) ? constant_test_bit((PG_dirty), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_dirty), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageLRU(struct page *page) { return (__builtin_constant_p((PG_lru)) ? constant_test_bit((PG_lru), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_lru), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageActive(struct page *page) { return (__builtin_constant_p((PG_active)) ? constant_test_bit((PG_active), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_active), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageActive(struct page *page) { set_bit(PG_active, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWorkingset(struct page *page) { return (__builtin_constant_p((PG_workingset)) ? constant_test_bit((PG_workingset), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_workingset), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageWorkingset(struct page *page) { set_bit(PG_workingset, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageWorkingset(struct page *page) { clear_bit(PG_workingset, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageWorkingset(struct page *page) { return test_and_clear_bit(PG_workingset, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSlab(struct page *page) { return (__builtin_constant_p((PG_slab)) ? constant_test_bit((PG_slab), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_slab), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSlobFree(struct page *page) { return (__builtin_constant_p((PG_slob_free)) ? constant_test_bit((PG_slob_free), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_slob_free), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageChecked(struct page *page) { return (__builtin_constant_p((PG_checked)) ? constant_test_bit((PG_checked), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_checked), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePinned(struct page *page) { return (__builtin_constant_p((PG_pinned)) ? constant_test_bit((PG_pinned), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_pinned), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSavePinned(struct page *page) { return (__builtin_constant_p((PG_savepinned)) ? constant_test_bit((PG_savepinned), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_savepinned), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageForeign(struct page *page) { return (__builtin_constant_p((PG_foreign)) ? constant_test_bit((PG_foreign), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_foreign), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageForeign(struct page *page) { set_bit(PG_foreign, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageForeign(struct page *page) { clear_bit(PG_foreign, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReserved(struct page *page) { return (__builtin_constant_p((PG_reserved)) ? constant_test_bit((PG_reserved), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_reserved), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageReserved(struct page *page) { __set_bit(PG_reserved, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSwapBacked(struct page *page) { return (__builtin_constant_p((PG_swapbacked)) ? constant_test_bit((PG_swapbacked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_swapbacked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSwapBacked(struct page *page) { __set_bit(PG_swapbacked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePrivate(struct page *page) { return (__builtin_constant_p((PG_private)) ? constant_test_bit((PG_private), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags)) : variable_test_bit((PG_private), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePrivate2(struct page *page) { return (__builtin_constant_p((PG_private_2)) ? constant_test_bit((PG_private_2), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags)) : variable_test_bit((PG_private_2), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageOwnerPriv1(struct page *page) { return (__builtin_constant_p((PG_owner_priv_1)) ? constant_test_bit((PG_owner_priv_1), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags)) : variable_test_bit((PG_owner_priv_1), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWriteback(struct page *page) { return (__builtin_constant_p((PG_writeback)) ? constant_test_bit((PG_writeback), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_writeback), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMappedToDisk(struct page *page) { return (__builtin_constant_p((PG_mappedtodisk)) ? constant_test_bit((PG_mappedtodisk), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_mappedtodisk), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReclaim(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_reclaim), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReadahead(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_reclaim), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReadahead(struct page *page) { return test_and_clear_bit(PG_reclaim, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
# 343 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageHighMem(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageHighMem(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageHighMem(struct page *page) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSwapCache(struct page *page)
{
return PageSwapBacked(page) && (__builtin_constant_p((PG_swapcache)) ? constant_test_bit((PG_swapcache), (&page->flags)) : variable_test_bit((PG_swapcache), (&page->flags)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageUnevictable(struct page *page) { return (__builtin_constant_p((PG_unevictable)) ? constant_test_bit((PG_unevictable), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags)) : variable_test_bit((PG_unevictable), (&({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMlocked(struct page *page) { return (__builtin_constant_p((PG_mlocked)) ? constant_test_bit((PG_mlocked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags)) : variable_test_bit((PG_mlocked), (&({ ((void)(sizeof(( long)(0 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageUncached(struct page *page) { return (__builtin_constant_p((PG_uncached)) ? constant_test_bit((PG_uncached), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags)) : variable_test_bit((PG_uncached), (&({ ((void)(sizeof(( long)(0 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUncached(struct page *page) { set_bit(PG_uncached, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUncached(struct page *page) { clear_bit(PG_uncached, &({ ((void)(sizeof(( long)(1 && PageCompound(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; }); })->flags); }
# 386 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageHWPoison(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageHWPoison(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageHWPoison(struct page *page) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool set_hwpoison_free_buddy_page(struct page *page)
{
return 0;
}
# 423 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMappingFlags(struct page *page)
{
return ((unsigned long)page->mapping & (0x1 | 0x2)) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageAnon(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & 0x1) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int __PageMovable(struct page *page)
{
return ((unsigned long)page->mapping & (0x1 | 0x2)) ==
0x2;
}
# 454 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageKsm(const struct page *page) { return 0; }
u64 stable_page_flags(struct page *page);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageUptodate(struct page *page)
{
int ret;
page = compound_head(page);
ret = (__builtin_constant_p((PG_uptodate)) ? constant_test_bit((PG_uptodate), (&(page)->flags)) : variable_test_bit((PG_uptodate), (&(page)->flags)));
# 472 "./include/linux/page-flags.h"
if (ret)
__asm__ __volatile__("" : : : "memory");
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageUptodate(struct page *page)
{
((void)(sizeof(( long)(PageTail(page)))));
__asm__ __volatile__("" : : : "memory");
__set_bit(PG_uptodate, &page->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUptodate(struct page *page)
{
((void)(sizeof(( long)(PageTail(page)))));
__asm__ __volatile__("" : : : "memory");
set_bit(PG_uptodate, &page->flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &({ ((void)(sizeof(( long)(1 && PageTail(page))))); ({ ((void)(sizeof(( long)(PagePoisoned(compound_head(page)))))); compound_head(page); }); })->flags); }
int test_clear_page_writeback(struct page *page);
int __test_set_page_writeback(struct page *page, bool keep_write);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_writeback(struct page *page)
{
__test_set_page_writeback(page, false);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_writeback_keepwrite(struct page *page)
{
__test_set_page_writeback(page, true);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageHead(struct page *page) { return (__builtin_constant_p((PG_head)) ? constant_test_bit((PG_head), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags)) : variable_test_bit((PG_head), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void set_compound_head(struct page *page, struct page *head)
{
({ union { typeof(page->compound_head) __val; char __c[1]; } __u = { .__val = ( typeof(page->compound_head)) ((unsigned long)head + 1) }; __write_once_size(&(page->compound_head), __u.__c, sizeof(page->compound_head)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clear_compound_head(struct page *page)
{
({ union { typeof(page->compound_head) __val; char __c[1]; } __u = { .__val = ( typeof(page->compound_head)) (0) }; __write_once_size(&(page->compound_head), __u.__c, sizeof(page->compound_head)); __u.__val; });
}
# 540 "./include/linux/page-flags.h"
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
bool page_huge_active(struct page *page);
# 652 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransHuge(const struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransCompound(const struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransCompoundMap(const struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransTail(const struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageDoubleMap(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageDoubleMap(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageDoubleMap(struct page *page) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int TestSetPageDoubleMap(struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int TestClearPageDoubleMap(struct page *page) { return 0; }
# 681 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_has_type(struct page *page)
{
return (int)page->page_type < -128;
}
# 706 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageBuddy(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000080)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageBuddy(struct page *page) { ((void)(sizeof(( long)(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000))))); page->page_type &= ~0x00000080; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageBuddy(struct page *page) { ((void)(sizeof(( long)(!PageBuddy(page))))); page->page_type |= 0x00000080; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageBalloon(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000100)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageBalloon(struct page *page) { ((void)(sizeof(( long)(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000))))); page->page_type &= ~0x00000100; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageBalloon(struct page *page) { ((void)(sizeof(( long)(!PageBalloon(page))))); page->page_type |= 0x00000100; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageKmemcg(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000200)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageKmemcg(struct page *page) { ((void)(sizeof(( long)(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000))))); page->page_type &= ~0x00000200; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageKmemcg(struct page *page) { ((void)(sizeof(( long)(!PageKmemcg(page))))); page->page_type |= 0x00000200; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageTable(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000400)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageTable(struct page *page) { ((void)(sizeof(( long)(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000))))); page->page_type &= ~0x00000400; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageTable(struct page *page) { ((void)(sizeof(( long)(!PageTable(page))))); page->page_type |= 0x00000400; }
extern bool is_free_buddy_page(struct page *page);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageIsolated(struct page *page) { return (__builtin_constant_p((PG_isolated)) ? constant_test_bit((PG_isolated), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags)) : variable_test_bit((PG_isolated), (&({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags))); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageIsolated(struct page *page) { __set_bit(PG_isolated, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageIsolated(struct page *page) { __clear_bit(PG_isolated, &({ ((void)(sizeof(( long)(PagePoisoned(page))))); page; })->flags); };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageSlabPfmemalloc(struct page *page)
{
((void)(sizeof(( long)(!PageSlab(page)))));
return PageActive(page);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageSlabPfmemalloc(struct page *page)
{
((void)(sizeof(( long)(!PageSlab(page)))));
SetPageActive(page);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __ClearPageSlabPfmemalloc(struct page *page)
{
((void)(sizeof(( long)(!PageSlab(page)))));
__ClearPageActive(page);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageSlabPfmemalloc(struct page *page)
{
((void)(sizeof(( long)(!PageSlab(page)))));
ClearPageActive(page);
}
# 794 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_has_private(struct page *page)
{
return !!(page->flags & (1UL << PG_private | 1UL << PG_private_2));
}
# 8 "./include/linux/page_ref.h" 2
extern struct tracepoint __tracepoint_page_ref_set;
extern struct tracepoint __tracepoint_page_ref_mod;
extern struct tracepoint __tracepoint_page_ref_mod_and_test;
extern struct tracepoint __tracepoint_page_ref_mod_and_return;
extern struct tracepoint __tracepoint_page_ref_mod_unless;
extern struct tracepoint __tracepoint_page_ref_freeze;
extern struct tracepoint __tracepoint_page_ref_unfreeze;
# 41 "./include/linux/page_ref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_set(struct page *page, int v)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_mod(struct page *page, int v)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_mod_and_test(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_mod_and_return(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_mod_unless(struct page *page, int v, int u)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_freeze(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __page_ref_unfreeze(struct page *page, int v)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_count(struct page *page)
{
return atomic_read(&page->_refcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_count(struct page *page)
{
return atomic_read(&compound_head(page)->_refcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
if (false)
__page_ref_set(page, v);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_page_count(struct page *page)
{
set_page_count(page, 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
if (false)
__page_ref_mod(page, nr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
if (false)
__page_ref_mod(page, -nr);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
if (false)
__page_ref_mod(page, 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
if (false)
__page_ref_mod(page, -1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
if (false)
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_inc_return(struct page *page)
{
int ret = atomic_add_return(1, (&page->_refcount));
if (false)
__page_ref_mod_and_return(page, 1, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
if (false)
__page_ref_mod_and_test(page, -1, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_dec_return(struct page *page)
{
int ret = atomic_sub_return(1, (&page->_refcount));
if (false)
__page_ref_mod_and_return(page, -1, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_add_unless(struct page *page, int nr, int u)
{
int ret = atomic_add_unless(&page->_refcount, nr, u);
if (false)
__page_ref_mod_unless(page, nr, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_freeze(struct page *page, int count)
{
int ret = __builtin_expect(!!(atomic_cmpxchg(&page->_refcount, count, 0) == count), 1);
if (false)
__page_ref_freeze(page, count, ret);
return ret;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_unfreeze(struct page *page, int count)
{
((void)(sizeof(( long)(page_count(page) != 0))));
((void)(sizeof(( long)(count == 0))));
do { do { extern void __compiletime_assert_178(void) ; if (!((sizeof(*&(&page->_refcount)->counter) == sizeof(char) || sizeof(*&(&page->_refcount)->counter) == sizeof(short) || sizeof(*&(&page->_refcount)->counter) == sizeof(int) || sizeof(*&(&page->_refcount)->counter) == sizeof(long)))) __compiletime_assert_178(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&(&page->_refcount)->counter) __val; char __c[1]; } __u = { .__val = ( typeof(*&(&page->_refcount)->counter)) ((count)) }; __write_once_size(&(*&(&page->_refcount)->counter), __u.__c, sizeof(*&(&page->_refcount)->counter)); __u.__val; }); } while (0);
if (false)
__page_ref_unfreeze(page, count);
}
# 27 "./include/linux/mm.h" 2
# 1 "./include/linux/memremap.h" 1
# 1 "./include/linux/ioport.h" 1
# 19 "./include/linux/ioport.h"
struct resource {
resource_size_t start;
resource_size_t end;
const char *name;
unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child;
};
# 127 "./include/linux/ioport.h"
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
};
# 169 "./include/linux/ioport.h"
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long resource_type(const struct resource *res)
{
return res->flags & 0x00001f00;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & 0x01000000;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool resource_contains(struct resource *r1, struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
if (r1->flags & 0x20000000 || r2->flags & 0x20000000)
return false;
return r1->start <= r2->start && r1->end >= r2->end;
}
# 228 "./include/linux/ioport.h"
extern struct resource * __request_region(struct resource *,
resource_size_t start,
resource_size_t n,
const char *name, int flags);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
struct device;
extern int devm_request_resource(struct device *dev, struct resource *root,
struct resource *new);
extern void devm_release_resource(struct device *dev, struct resource *new);
extern struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name);
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
walk_mem_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool resource_overlaps(struct resource *r1, struct resource *r2)
{
return (r1->start <= r2->end && r1->end >= r2->start);
}
# 5 "./include/linux/memremap.h" 2
struct resource;
struct device;
# 18 "./include/linux/memremap.h"
struct vmem_altmap {
const unsigned long base_pfn;
const unsigned long reserve;
unsigned long free;
unsigned long align;
unsigned long alloc;
};
# 59 "./include/linux/memremap.h"
enum memory_type {
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_PCI_P2PDMA,
};
# 75 "./include/linux/memremap.h"
typedef void (*dev_page_free_t)(struct page *page, void *data);
# 88 "./include/linux/memremap.h"
struct dev_pagemap {
dev_page_free_t page_free;
struct vmem_altmap altmap;
bool altmap_valid;
struct resource res;
struct percpu_ref *ref;
void (*kill)(struct percpu_ref *ref);
struct device *dev;
void *data;
enum memory_type type;
u64 pci_p2pdma_bus_offset;
};
# 109 "./include/linux/memremap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
{
({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("./include/linux/memremap.h"), "i" (117), "i" ((1 << 0)|((1 << 1)|((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (14)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return ERR_PTR(-6);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
percpu_ref_put(pgmap->ref);
}
# 28 "./include/linux/mm.h" 2
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;
struct bdi_writeback;
void init_mm_internals(void);
# 48 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_max_mapnr(unsigned long limit) { }
extern atomic_long_t _totalram_pages;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long totalram_pages(void)
{
return (unsigned long)atomic_long_read(&_totalram_pages);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_inc(void)
{
atomic_long_inc(&_totalram_pages);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_dec(void)
{
atomic_long_dec(&_totalram_pages);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_add(long count)
{
atomic_long_add(count, &_totalram_pages);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_set(long val)
{
atomic_long_set(&_totalram_pages, val);
}
extern void * high_memory;
extern int page_cluster;
extern int sysctl_legacy_va_layout;
extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits __attribute__((__section__(".data..read_mostly")));
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __attribute__((__section__(".data..read_mostly")));
# 1 "./arch/x86/include/asm/pgtable.h" 1
# 27 "./arch/x86/include/asm/pgtable.h"
extern pgd_t early_top_pgt[512];
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __early_make_pgtable(unsigned long address, pmdval_t pmd);
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
void ptdump_walk_pgd_level_checkwx(void);
void ptdump_walk_user_pgd_level_checkwx(void);
# 47 "./arch/x86/include/asm/pgtable.h"
extern unsigned long empty_zero_page[((1UL) << 12) / sizeof(unsigned long)]
;
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
extern struct mm_struct *pgd_page_get_mm(struct page *page);
extern pmdval_t early_pmd_flags;
# 121 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_dirty(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 6);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 read_pkru(void)
{
if ((__builtin_constant_p((16*32+ 4)) && ( ((((16*32+ 4))>>5)==(0) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+ 4))>>5)==(1) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+ 4))>>5)==(2) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(3) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+ 4))>>5)==(4) && (1UL<<(((16*32+ 4))&31) & (0) )) || ((((16*32+ 4))>>5)==(5) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(6) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(7) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(8) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(9) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(10) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(11) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(12) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(13) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(14) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(15) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(16) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(17) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(18) && (1UL<<(((16*32+ 4))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p(((16*32+ 4))) ? constant_test_bit(((16*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((16*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))
return __read_pkru();
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_pkru(u32 pkru)
{
if ((__builtin_constant_p((16*32+ 4)) && ( ((((16*32+ 4))>>5)==(0) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+ 4))>>5)==(1) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+ 4))>>5)==(2) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(3) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+ 4))>>5)==(4) && (1UL<<(((16*32+ 4))&31) & (0) )) || ((((16*32+ 4))>>5)==(5) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(6) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(7) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(8) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(9) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(10) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(11) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(12) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(13) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(14) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(15) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(16) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(17) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(18) && (1UL<<(((16*32+ 4))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p(((16*32+ 4))) ? constant_test_bit(((16*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((16*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))
__write_pkru(pkru);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_young(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 5);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_dirty(pmd_t pmd)
{
return pmd_flags(pmd) & (((pteval_t)(1)) << 6);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_young(pmd_t pmd)
{
return pmd_flags(pmd) & (((pteval_t)(1)) << 5);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_dirty(pud_t pud)
{
return pud_flags(pud) & (((pteval_t)(1)) << 6);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_young(pud_t pud)
{
return pud_flags(pud) & (((pteval_t)(1)) << 5);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_write(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_huge(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 7);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_global(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 8);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_exec(pte_t pte)
{
return !(pte_flags(pte) & (((pteval_t)(1)) << 63));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_special(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(1)) << 9);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 protnone_mask(u64 val);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pte_pfn(pte_t pte)
{
phys_addr_t pfn = native_pte_val(pte);
pfn ^= protnone_mask(pfn);
return (pfn & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))))) >> 12;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_pfn(pmd_t pmd)
{
phys_addr_t pfn = native_pmd_val(pmd);
pfn ^= protnone_mask(pfn);
return (pfn & pmd_pfn_mask(pmd)) >> 12;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_pfn(pud_t pud)
{
phys_addr_t pfn = native_pud_val(pud);
pfn ^= protnone_mask(pfn);
return (pfn & pud_pfn_mask(pud)) >> 12;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_pfn(p4d_t p4d)
{
return ((native_pgd_val((p4d).pgd)) & p4d_pfn_mask(p4d)) >> 12;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pgd_pfn(pgd_t pgd)
{
return (native_pgd_val(pgd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))))) >> 12;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_large(p4d_t p4d)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_large(pmd_t pte)
{
return pmd_flags(pte) & (((pteval_t)(1)) << 7);
}
# 282 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkclean(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 6));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkold(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_wrprotect(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkexec(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 63));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkdirty(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 6) | (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkyoung(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkwrite(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkhuge(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 7));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clrhuge(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 7));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkglobal(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 8));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clrglobal(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(1)) << 8));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkspecial(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 9));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkdevmap(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(1)) << 9)|(((u64)(1)) << 58));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);
return native_make_pmd(v | set);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
pmdval_t v = native_pmd_val(pmd);
return native_make_pmd(v & ~clear);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkold(pmd_t pmd)
{
return pmd_clear_flags(pmd, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkclean(pmd_t pmd)
{
return pmd_clear_flags(pmd, (((pteval_t)(1)) << 6));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_wrprotect(pmd_t pmd)
{
return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkdirty(pmd_t pmd)
{
return pmd_set_flags(pmd, (((pteval_t)(1)) << 6) | (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkdevmap(pmd_t pmd)
{
return pmd_set_flags(pmd, (((u64)(1)) << 58));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, (((pteval_t)(1)) << 7));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkyoung(pmd_t pmd)
{
return pmd_set_flags(pmd, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkwrite(pmd_t pmd)
{
return pmd_set_flags(pmd, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_set_flags(pud_t pud, pudval_t set)
{
pudval_t v = native_pud_val(pud);
return native_make_pud(v | set);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
pudval_t v = native_pud_val(pud);
return native_make_pud(v & ~clear);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkold(pud_t pud)
{
return pud_clear_flags(pud, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkclean(pud_t pud)
{
return pud_clear_flags(pud, (((pteval_t)(1)) << 6));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_wrprotect(pud_t pud)
{
return pud_clear_flags(pud, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkdirty(pud_t pud)
{
return pud_set_flags(pud, (((pteval_t)(1)) << 6) | (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkdevmap(pud_t pud)
{
return pud_set_flags(pud, (((u64)(1)) << 58));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkhuge(pud_t pud)
{
return pud_set_flags(pud, (((pteval_t)(1)) << 7));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkyoung(pud_t pud)
{
return pud_set_flags(pud, (((pteval_t)(1)) << 5));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkwrite(pud_t pud)
{
return pud_set_flags(pud, (((pteval_t)(1)) << 1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_soft_dirty(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(0)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_soft_dirty(pmd_t pmd)
{
return pmd_flags(pmd) & (((pteval_t)(0)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_soft_dirty(pud_t pud)
{
return pud_flags(pud) & (((pteval_t)(0)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
return pmd_set_flags(pmd, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mksoft_dirty(pud_t pud)
{
return pud_set_flags(pud, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
return pmd_clear_flags(pmd, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_clear_soft_dirty(pud_t pud)
{
return pud_clear_flags(pud, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t massage_pgprot(pgprot_t pgprot)
{
pgprotval_t protval = ((pgprot).pgprot);
if (protval & (((pteval_t)(1)) << 0))
protval &= __supported_pte_mask;
return protval;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t check_pgprot(pgprot_t pgprot)
{
pgprotval_t massaged_val = massage_pgprot(pgprot);
# 545 "./arch/x86/include/asm/pgtable.h"
return massaged_val;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
phys_addr_t pfn = (phys_addr_t)page_nr << 12;
pfn ^= protnone_mask(((pgprot).pgprot));
pfn &= ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1))));
return native_make_pte(pfn | check_pgprot(pgprot));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
phys_addr_t pfn = (phys_addr_t)page_nr << 12;
pfn ^= protnone_mask(((pgprot).pgprot));
pfn &= (((signed long)(~(((1UL) << 21)-1))) & ((phys_addr_t)((1ULL << 52) - 1)));
return native_make_pmd(pfn | check_pgprot(pgprot));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
phys_addr_t pfn = (phys_addr_t)page_nr << 12;
pfn ^= protnone_mask(((pgprot).pgprot));
pfn &= (((signed long)(~(((1UL) << 30)-1))) & ((phys_addr_t)((1ULL << 52) - 1)));
return native_make_pud(pfn | check_pgprot(pgprot));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pfn_pmd(pmd_pfn(pmd),
((pgprot_t) { (pmd_flags(pmd) & ~((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 8))) } ));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mknotpresent(pud_t pud)
{
return pfn_pud(pud_pfn(pud),
((pgprot_t) { (pud_flags(pud) & ~((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 8))) } ));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = native_pte_val(pte), oldval = val;
val &= (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0))) | (((u64)(1)) << 58));
val |= check_pgprot(newprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0))) | (((u64)(1)) << 58));
val = flip_protnone_guard(oldval, val, ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))));
return native_make_pte(val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmdval_t val = native_pmd_val(pmd), oldval = val;
val &= ((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0))) | (((u64)(1)) << 58)) | (((pteval_t)(1)) << 7));
val |= check_pgprot(newprot) & ~((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0))) | (((u64)(1)) << 58)) | (((pteval_t)(1)) << 7));
val = flip_protnone_guard(oldval, val, (((signed long)(~(((1UL) << 21)-1))) & ((phys_addr_t)((1ULL << 52) - 1))));
return native_make_pmd(val);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = ((oldprot).pgprot) & (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 52) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0))) | (((u64)(1)) << 58));
pgprotval_t addbits = ((newprot).pgprot);
return ((pgprot_t) { (preservebits | addbits) } );
}
# 626 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t arch_filter_pgprot(pgprot_t prot)
{
return ((pgprot_t) { (massage_pgprot(prot)) } );
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_new_memtype_allowed(u64 paddr, unsigned long size,
enum page_cache_mode pcm,
enum page_cache_mode new_pcm)
{
if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
return 1;
# 649 "./arch/x86/include/asm/pgtable.h"
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WC &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WC)) {
return 0;
}
return 1;
}
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
if (!( __builtin_constant_p((__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 7*32+11))) ? constant_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 7*32+11))) ? constant_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : ({ __label__ t_yes; __label__ t_no; __label__ l_done; bool ret; asm goto("1: jmp 6f\n" "2:\n" ".skip -(((5f-4f) - (2b-1b)) > 0) * " "((5f-4f) - (2b-1b)),0x90\n" "3:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 4f - .\n" " .word %P[always]\n" " .byte 3b - 1b\n" " .byte 5f - 4f\n" " .byte 3b - 2b\n" ".previous\n" ".section .altinstr_replacement,\"ax\"\n" "4: jmp %l[t_no]\n" "5:\n" ".previous\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 0\n" " .word %P[feature]\n" " .byte 3b - 1b\n" " .byte 0\n" " .byte 0\n" ".previous\n" ".section .altinstr_aux,\"ax\"\n" "6:\n" " testb %[bitnum],%[cap_byte]\n" " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".previous\n" : : [feature] "i" (( 7*32+11)), [always] "i" (( 3*32+21)), [bitnum] "i" (1 << (( 7*32+11) & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[( 7*32+11) >> 3]) : : t_yes, t_no); t_yes: ret = true; goto l_done; t_no: ret = false; l_done: ret; }) ))
return pgd;
return __pti_set_user_pgtbl(pgdp, pgd);
}
# 693 "./arch/x86/include/asm/pgtable.h"
# 1 "./arch/x86/include/asm/pgtable_64.h" 1
# 19 "./arch/x86/include/asm/pgtable_64.h"
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512 * 2];
extern pgd_t init_top_pgt[];
extern void paging_init(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sync_initial_page_table(void) { }
# 54 "./arch/x86/include/asm/pgtable_64.h"
struct mm_struct;
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte(pte_t *ptep, pte_t pte)
{
({ union { typeof(*ptep) __val; char __c[1]; } __u = { .__val = ( typeof(*ptep)) (pte) }; __write_once_size(&(*ptep), __u.__c, sizeof(*ptep)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
native_set_pte(ptep, native_make_pte(0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
({ union { typeof(*pmdp) __val; char __c[1]; } __u = { .__val = ( typeof(*pmdp)) (pmd) }; __write_once_size(&(*pmdp), __u.__c, sizeof(*pmdp)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pmd_clear(pmd_t *pmd)
{
native_set_pmd(pmd, native_make_pmd(0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_ptep_get_and_clear(pte_t *xp)
{
return native_make_pte(({ typeof(&xp->pte) __ai_ptr = (&xp->pte); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((0))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
return native_make_pmd(({ typeof(&xp->pmd) __ai_ptr = (&xp->pmd); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((0))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pud(pud_t *pudp, pud_t pud)
{
({ union { typeof(*pudp) __val; char __c[1]; } __u = { .__val = ( typeof(*pudp)) (pud) }; __write_once_size(&(*pudp), __u.__c, sizeof(*pudp)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pud_clear(pud_t *pud)
{
native_set_pud(pud, native_make_pud(0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_pudp_get_and_clear(pud_t *xp)
{
return native_make_pud(({ typeof(&xp->pud) __ai_ptr = (&xp->pud); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((0))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }));
# 134 "./arch/x86/include/asm/pgtable_64.h"
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
if (0 || !1) {
({ union { typeof(*p4dp) __val; char __c[1]; } __u = { .__val = ( typeof(*p4dp)) (p4d) }; __write_once_size(&(*p4dp), __u.__c, sizeof(*p4dp)); __u.__val; });
return;
}
pgd = native_make_pgd(native_p4d_val(p4d));
pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
({ union { typeof(*p4dp) __val; char __c[1]; } __u = { .__val = ( typeof(*p4dp)) (native_make_p4d(native_pgd_val(pgd))) }; __write_once_size(&(*p4dp), __u.__c, sizeof(*p4dp)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_p4d_clear(p4d_t *p4d)
{
native_set_p4d(p4d, native_make_p4d(0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
({ union { typeof(*pgdp) __val; char __c[1]; } __u = { .__val = ( typeof(*pgdp)) (pti_set_user_pgtbl(pgdp, pgd)) }; __write_once_size(&(*pgdp), __u.__c, sizeof(*pgdp)); __u.__val; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pgd_clear(pgd_t *pgd)
{
native_set_pgd(pgd, native_make_pgd(0));
}
extern void sync_global_pgds(unsigned long start, unsigned long end);
# 238 "./arch/x86/include/asm/pgtable_64.h"
extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);
# 258 "./arch/x86/include/asm/pgtable_64.h"
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gup_fast_permitted(unsigned long start, int nr_pages,
int write)
{
unsigned long len, end;
len = (unsigned long)nr_pages << 12;
end = start + len;
if (end < start)
return false;
if (end >> 47)
return false;
return true;
}
# 1 "./arch/x86/include/asm/pgtable-invert.h" 1
# 16 "./arch/x86/include/asm/pgtable-invert.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pte_needs_invert(u64 val)
{
return val && !(val & (((pteval_t)(1)) << 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 protnone_mask(u64 val)
{
return __pte_needs_invert(val) ? ~0ull : 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
{
if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
val = (val & ~mask) | (~val & mask);
return val;
}
# 277 "./arch/x86/include/asm/pgtable_64.h" 2
# 694 "./arch/x86/include/asm/pgtable.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_none(pte_t pte)
{
return !(pte.pte & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_same(pte_t a, pte_t b)
{
return a.pte == b.pte;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_present(pte_t a)
{
return pte_flags(a) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_devmap(pte_t a)
{
return (pte_flags(a) & (((u64)(1)) << 58)) == (((u64)(1)) << 58);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pte_accessible(struct mm_struct *mm, pte_t a)
{
if (pte_flags(a) & (((pteval_t)(1)) << 0))
return true;
if ((pte_flags(a) & (((pteval_t)(1)) << 8)) &&
mm_tlb_flush_pending(mm))
return true;
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8) | (((pteval_t)(1)) << 7));
}
# 767 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none(pmd_t pmd)
{
unsigned long val = native_pmd_val(pmd);
return (val & ~((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5))) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)((void *)((unsigned long)(native_pmd_val(pmd) & pmd_pfn_mask(pmd))+((unsigned long)page_offset_base)));
}
# 792 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_index(unsigned long address)
{
return (address >> 21) & (512 - 1);
}
# 812 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pte_index(unsigned long address)
{
return (address >> 12) & (512 - 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~(((pteval_t)(1)) << 2)) != ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0ULL))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_none(pud_t pud)
{
return (native_pud_val(pud) & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5)))) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_present(pud_t pud)
{
return pud_flags(pud) & (((pteval_t)(1)) << 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)((void *)((unsigned long)(native_pud_val(pud) & pud_pfn_mask(pud))+((unsigned long)page_offset_base)));
}
# 855 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_large(pud_t pud)
{
return (native_pud_val(pud) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) ==
((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0ULL)))) | (((pteval_t)(1)) << 2))) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_index(unsigned long address)
{
return (address >> 30) & (512 - 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_none(p4d_t p4d)
{
return (native_p4d_val(p4d) & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5)))) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_present(p4d_t p4d)
{
return p4d_flags(p4d) & (((pteval_t)(1)) << 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_page_vaddr(p4d_t p4d)
{
return (unsigned long)((void *)((unsigned long)((native_pgd_val((p4d).pgd)) & p4d_pfn_mask(p4d))+((unsigned long)page_offset_base)));
}
# 905 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_bad(p4d_t p4d)
{
unsigned long ignore_flags = ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(0ULL)))) | (((pteval_t)(1)) << 2);
if (1)
ignore_flags |= (((pteval_t)(1)) << 63);
return (p4d_flags(p4d) & ~ignore_flags) != 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_index(unsigned long address)
{
return (address >> 39) & (1 - 1);
}
# 1011 "./arch/x86/include/asm/pgtable.h"
extern int direct_gbpages;
void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);
extern pgd_t trampoline_pgd_entry;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_trampoline_default(void)
{
trampoline_pgd_entry = init_top_pgt[(((page_offset_base) >> 39) & (512 - 1))];
}
void __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_trampoline(void);
# 1034 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
pte_t res = *ptep;
native_pte_clear(((void *)0), 0, ptep);
return res;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
pmd_t res = *pmdp;
native_pmd_clear(pmdp);
return res;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_local_pudp_get_and_clear(pud_t *pudp)
{
pud_t res = *pudp;
native_pud_clear(pudp);
return res;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
native_set_pmd(pmdp, pmd);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
native_set_pud(pudp, pud);
}
# 1084 "./arch/x86/include/asm/pgtable.h"
struct vm_area_struct;
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
return pte;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
int full)
{
pte_t pte;
if (full) {
pte = native_local_ptep_get_and_clear(ptep);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
return pte;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
clear_bit(1, (unsigned long *)&ptep->pte);
}
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
extern int pudp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
pud_t entry, int dirty);
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp);
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_write(pmd_t pmd)
{
return pmd_flags(pmd) & (((pteval_t)(1)) << 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
return native_pmdp_get_and_clear(pmdp);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
return native_pudp_get_and_clear(pudp);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
clear_bit(1, (unsigned long *)pmdp);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_write(pud_t pud)
{
return pud_flags(pud) & (((pteval_t)(1)) << 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
if (1) {
return ({ typeof(pmdp) __ai_ptr = (pmdp); kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = (((pmd))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); });
} else {
pmd_t old = *pmdp;
({ union { typeof(*pmdp) __val; char __c[1]; } __u = { .__val = ( typeof(*pmdp)) (pmd) }; __write_once_size(&(*pmdp), __u.__c, sizeof(*pmdp)); __u.__val; });
return old;
}
}
# 1209 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgdp_maps_userspace(void *__ptr)
{
unsigned long ptr = (unsigned long)__ptr;
return (((ptr & ~(~(((1UL) << 12)-1))) / sizeof(pgd_t)) < ((((1UL) << 12) / 2) / sizeof(pgd_t)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_large(pgd_t pgd) { return 0; }
# 1231 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ptr_set_bit(void *ptr, int bit)
{
unsigned long __ptr = (unsigned long)ptr;
__ptr |= (1UL << (bit));
return (void *)__ptr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ptr_clear_bit(void *ptr, int bit)
{
unsigned long __ptr = (unsigned long)ptr;
__ptr &= ~(1UL << (bit));
return (void *)__ptr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
{
return ptr_set_bit(pgdp, 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
{
return ptr_clear_bit(pgdp, 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
{
return ptr_set_bit(p4dp, 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
{
return ptr_clear_bit(p4dp, 12);
}
# 1277 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
({ size_t __len = (count * sizeof(pgd_t)); void *__ret; if (__builtin_constant_p(count * sizeof(pgd_t)) && __len >= 64) __ret = __memcpy((dst), (src), __len); else __ret = __builtin_memcpy((dst), (src), __len); __ret; });
if (!( __builtin_constant_p((__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 7*32+11))) ? constant_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p((( 7*32+11))) ? constant_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit((( 7*32+11)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : ({ __label__ t_yes; __label__ t_no; __label__ l_done; bool ret; asm goto("1: jmp 6f\n" "2:\n" ".skip -(((5f-4f) - (2b-1b)) > 0) * " "((5f-4f) - (2b-1b)),0x90\n" "3:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 4f - .\n" " .word %P[always]\n" " .byte 3b - 1b\n" " .byte 5f - 4f\n" " .byte 3b - 2b\n" ".previous\n" ".section .altinstr_replacement,\"ax\"\n" "4: jmp %l[t_no]\n" "5:\n" ".previous\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 0\n" " .word %P[feature]\n" " .byte 3b - 1b\n" " .byte 0\n" " .byte 0\n" ".previous\n" ".section .altinstr_aux,\"ax\"\n" "6:\n" " testb %[bitnum],%[cap_byte]\n" " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".previous\n" : : [feature] "i" (( 7*32+11)), [always] "i" (( 3*32+21)), [bitnum] "i" (1 << (( 7*32+11) & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[( 7*32+11) >> 3]) : : t_yes, t_no); t_yes: ret = true; goto l_done; t_no: ret = false; l_done: ret; }) ))
return;
({ size_t __len = (count * sizeof(pgd_t)); void *__ret; if (__builtin_constant_p(count * sizeof(pgd_t)) && __len >= 64) __ret = __memcpy((kernel_to_user_pgdp(dst)), (kernel_to_user_pgdp(src)), __len); else __ret = __builtin_memcpy((kernel_to_user_pgdp(dst)), (kernel_to_user_pgdp(src)), __len); __ret; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_level_shift(enum pg_level level)
{
return (12 - ( __builtin_constant_p(512) ? ( __builtin_constant_p(512) ? ( (512) < 2 ? 0 : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )) + level * ( __builtin_constant_p(512) ? ( __builtin_constant_p(512) ? ( (512) < 2 ? 0 : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) );
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_level_size(enum pg_level level)
{
return 1UL << page_level_shift(level);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_level_mask(enum pg_level level)
{
return ~(page_level_size(level) - 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, (((pteval_t)(0))));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_swp_soft_dirty(pte_t pte)
{
return pte_flags(pte) & (((pteval_t)(0)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, (((pteval_t)(0))));
}
# 1358 "./arch/x86/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_read(u32 pkru, u16 pkey)
{
int pkru_pkey_bits = pkey * 2;
return !(pkru & (0x1 << pkru_pkey_bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_write(u32 pkru, u16 pkey)
{
int pkru_pkey_bits = pkey * 2;
return !(pkru & ((0x1|0x2) << pkru_pkey_bits));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 pte_flags_pkey(unsigned long pte_flags)
{
return (pte_flags & ((((pteval_t)(1)) << 59) | (((pteval_t)(1)) << 60) | (((pteval_t)(1)) << 61) | (((pteval_t)(1)) << 62))) >> 59;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_pkey(u16 pkey, bool write)
{
u32 pkru = read_pkru();
if (!__pkru_allows_read(pkru, pkey))
return false;
if (write && !__pkru_allows_write(pkru, pkey))
return false;
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pte_access_permitted(unsigned long pteval, bool write)
{
unsigned long need_pte_bits = (((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 2);
if (write)
need_pte_bits |= (((pteval_t)(1)) << 1);
if ((pteval & need_pte_bits) != need_pte_bits)
return 0;
return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pte_access_permitted(pte_t pte, bool write)
{
return __pte_access_permitted(native_pte_val(pte), write);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pmd_access_permitted(pmd_t pmd, bool write)
{
return __pte_access_permitted(native_pmd_val(pmd), write);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pud_access_permitted(pud_t pud, bool write)
{
return __pte_access_permitted(native_pud_val(pud), write);
}
extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_has_pfn_modify_check(void)
{
return (__builtin_constant_p((((19*32 + (18))))) && ( ((((((19*32 + (18)))))>>5)==(0) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 0*32+ 0) & 31))|(1<<(( 0*32+ 3)) & 31)|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|(1<<(( 0*32+13)) & 31)|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((((19*32 + (18)))))>>5)==(1) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((((19*32 + (18)))))>>5)==(2) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(3) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((((19*32 + (18)))))>>5)==(4) && (1UL<<(((((19*32 + (18)))))&31) & (0) )) || ((((((19*32 + (18)))))>>5)==(5) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(6) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(7) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(8) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(9) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(10) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(11) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(12) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(13) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(14) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(15) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(16) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(17) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(18) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || (sizeof(struct { int:(-!!(19 != 19)); })) || (sizeof(struct { int:(-!!(19 != 19)); }))) ? 1 : (__builtin_constant_p(((((19*32 + (18)))))) ? constant_test_bit(((((19*32 + (18))))), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((((19*32 + (18))))), ((unsigned long *)((&boot_cpu_data)->x86_capability)))));
}
# 1 "./include/asm-generic/pgtable.h" 1
# 196 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pte_clear_not_present_full(struct mm_struct *mm,
unsigned long address,
pte_t *ptep,
int full)
{
native_pte_clear(mm, address, ptep);
}
extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep);
extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pud_t *pudp);
# 271 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
pud_t old_pud = *pudp;
set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
}
# 292 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
do { extern void __compiletime_assert_296(void) ; if (!(!(1))) __compiletime_assert_296(); } while (0);
return *pmdp;
}
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
# 328 "./include/asm-generic/pgtable.h"
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
# 346 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_unused(pte_t pte)
{
return 0;
}
# 378 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return native_pmd_val(pmd_a) == native_pmd_val(pmd_b);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_same(pud_t pud_a, pud_t pud_b)
{
return native_pud_val(pud_a) == native_pud_val(pud_b);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
return (native_pgd_val((p4d_a).pgd)) == (native_pgd_val((p4d_b).pgd));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
return native_pgd_val(pgd_a) == native_pgd_val(pgd_b);
}
# 450 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_do_swap_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t pte, pte_t oldpte)
{
}
# 468 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_unmap_one(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t orig_pte)
{
return 0;
}
# 560 "./include/asm-generic/pgtable.h"
void pgd_clear_bad(pgd_t *);
void p4d_clear_bad(p4d_t *);
void pud_clear_bad(pud_t *);
void pmd_clear_bad(pmd_t *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_none_or_clear_bad(pgd_t *pgd)
{
if (pgd_none(*pgd))
return 1;
if (__builtin_expect(!!(pgd_bad(*pgd)), 0)) {
pgd_clear_bad(pgd);
return 1;
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_none_or_clear_bad(p4d_t *p4d)
{
if (p4d_none(*p4d))
return 1;
if (__builtin_expect(!!(p4d_bad(*p4d)), 0)) {
p4d_clear_bad(p4d);
return 1;
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
return 1;
if (__builtin_expect(!!(pud_bad(*pud)), 0)) {
pud_clear_bad(pud);
return 1;
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none_or_clear_bad(pmd_t *pmd)
{
if (pmd_none(*pmd))
return 1;
if (__builtin_expect(!!(pmd_bad(*pmd)), 0)) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t __ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
native_set_pte_at(mm, addr, ptep, pte);
}
# 647 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
return __ptep_modify_prot_start(mm, addr, ptep);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
__ptep_modify_prot_commit(mm, addr, ptep, pte);
}
# 717 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
# 847 "./include/asm-generic/pgtable.h"
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size);
extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
pfn_t pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size);
extern void untrack_pfn_moved(struct vm_area_struct *vma);
# 869 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
return pfn == zero_pfn;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long my_zero_pfn(unsigned long addr)
{
extern unsigned long zero_pfn;
return zero_pfn;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_trans_huge(pmd_t pmd)
{
return 0;
}
# 909 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_trans_huge(pud_t pud)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_read_atomic(pmd_t *pmdp)
{
return *pmdp;
}
# 951 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
pmd_t pmdval = pmd_read_atomic(pmd);
# 986 "./include/asm-generic/pgtable.h"
if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
(0 && !pmd_present(pmdval)))
return 1;
if (__builtin_expect(!!(pmd_bad(pmdval)), 0)) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
# 1009 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_trans_unstable(pmd_t *pmd)
{
return 0;
}
# 1027 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_protnone(pte_t pte)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_protnone(pmd_t pmd)
{
return 0;
}
# 1046 "./include/asm-generic/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_clear_huge(p4d_t *p4d)
{
return 0;
}
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
int pud_free_pmd_page(pud_t *pud, unsigned long addr);
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
# 1121 "./include/asm-generic/pgtable.h"
struct file;
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot);
# 1441 "./arch/x86/include/asm/pgtable.h" 2
# 99 "./include/linux/mm.h" 2
# 152 "./include/linux/mm.h"
extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
extern int overcommit_ratio_handler(struct ctl_table *, int, void *,
size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void *,
size_t *, loff_t *);
# 185 "./include/linux/mm.h"
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
# 337 "./include/linux/mm.h"
extern pgprot_t protection_map[16];
# 370 "./include/linux/mm.h"
struct vm_fault {
struct vm_area_struct *vma;
unsigned int flags;
gfp_t gfp_mask;
unsigned long pgoff;
unsigned long address;
pmd_t *pmd;
pud_t *pud;
pte_t orig_pte;
struct page *cow_page;
struct mem_cgroup *memcg;
struct page *page;
pte_t *pte;
spinlock_t *ptl;
pgtable_t prealloc_pte;
};
enum page_entry_size {
PE_SIZE_PTE = 0,
PE_SIZE_PMD,
PE_SIZE_PUD,
};
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
unsigned long start_pgoff, unsigned long end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
const char *(*name)(struct vm_area_struct *vma);
# 458 "./include/linux/mm.h"
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
# 470 "./include/linux/mm.h"
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
static const struct vm_operations_struct dummy_vm_ops = {};
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = ((void *)0);
}
struct mmu_gather;
struct inode;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_devmap(pmd_t pmd)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_devmap(pud_t pud)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_devmap(pgd_t pgd)
{
return 0;
}
# 1 "./include/linux/huge_mm.h" 1
# 1 "./include/linux/sched/coredump.h" 1
# 17 "./include/linux/sched/coredump.h"
extern void set_dumpable(struct mm_struct *mm, int value);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & ((1 << 2) - 1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
# 6 "./include/linux/huge_mm.h" 2
# 1 "./include/linux/fs.h" 1
# 1 "./include/linux/wait_bit.h" 1
# 10 "./include/linux/wait_bit.h"
struct wait_bit_key {
void *flags;
int bit_nr;
unsigned long timeout;
};
struct wait_bit_queue_entry {
struct wait_bit_key key;
struct wait_queue_entry wq_entry;
};
typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
void wake_up_bit(void *word, int bit);
int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
struct wait_queue_head *bit_waitqueue(void *word, int bit);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) wait_bit_init(void);
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
# 49 "./include/linux/wait_bit.h"
extern int bit_wait(struct wait_bit_key *key, int mode);
extern int bit_wait_io(struct wait_bit_key *key, int mode);
extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
# 70 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
do { _cond_resched(); } while (0);
if (!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
mode);
}
# 95 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
do { _cond_resched(); } while (0);
if (!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
mode);
}
# 121 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
do { _cond_resched(); } while (0);
if (!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
mode, timeout);
}
# 149 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
do { _cond_resched(); } while (0);
if (!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
# 178 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
do { _cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
}
# 202 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
{
do { _cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
}
# 228 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int
wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
do { _cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
}
extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
extern void wake_up_var(void *var);
extern wait_queue_head_t *__var_waitqueue(void *p);
# 317 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_and_wake_up_bit(int bit, void *word)
{
clear_bit_unlock(bit, word);
__asm__ __volatile__("" : : : "memory");
wake_up_bit(word, bit);
}
# 7 "./include/linux/fs.h" 2
# 1 "./include/linux/kdev_t.h" 1
# 1 "./include/uapi/linux/kdev_t.h" 1
# 6 "./include/linux/kdev_t.h" 2
# 24 "./include/linux/kdev_t.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool old_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 old_encode_dev(dev_t dev)
{
return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t old_decode_dev(u16 val)
{
return ((((val >> 8) & 255) << 20) | (val & 255));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 new_encode_dev(dev_t dev)
{
unsigned major = ((unsigned int) ((dev) >> 20));
unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return (((major) << 20) | (minor));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sysv_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 sysv_encode_dev(dev_t dev)
{
return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
# 8 "./include/linux/fs.h" 2
# 1 "./include/linux/dcache.h" 1
# 1 "./include/linux/rculist_bl.h" 1
# 1 "./include/linux/list_bl.h" 1
# 34 "./include/linux/list_bl.h"
struct hlist_bl_head {
struct hlist_bl_node *first;
};
struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
h->next = ((void *)0);
h->pprev = ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_unhashed(const struct hlist_bl_node *h)
{
return !h->pprev;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
{
return (struct hlist_bl_node *)
((unsigned long)h->first & ~1UL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_set_first(struct hlist_bl_head *h,
struct hlist_bl_node *n)
{
;
;
h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_empty(const struct hlist_bl_head *h)
{
return !((unsigned long)({ union { typeof(h->first) __val; char __c[1]; } __u; if (1) __read_once_size(&(h->first), __u.__c, sizeof(h->first)); else __read_once_size_nocheck(&(h->first), __u.__c, sizeof(h->first)); do { } while (0); __u.__val; }) & ~1UL);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_head(struct hlist_bl_node *n,
struct hlist_bl_head *h)
{
struct hlist_bl_node *first = hlist_bl_first(h);
n->next = first;
if (first)
first->pprev = &n->next;
n->pprev = &h->first;
hlist_bl_set_first(h, n);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hlist_bl_del(struct hlist_bl_node *n)
{
struct hlist_bl_node *next = n->next;
struct hlist_bl_node **pprev = n->pprev;
;
({ union { typeof(*pprev) __val; char __c[1]; } __u = { .__val = ( typeof(*pprev)) ((struct hlist_bl_node *) ((unsigned long)next | ((unsigned long)*pprev & 1UL))) }; __write_once_size(&(*pprev), __u.__c, sizeof(*pprev)); __u.__val; });
if (next)
next->pprev = pprev;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del(struct hlist_bl_node *n)
{
__hlist_bl_del(n);
n->next = ((void *) 0x100 + (0xdead000000000000UL));
n->pprev = ((void *) 0x200 + (0xdead000000000000UL));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del_init(struct hlist_bl_node *n)
{
if (!hlist_bl_unhashed(n)) {
__hlist_bl_del(n);
INIT_HLIST_BL_NODE(n);
}
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_lock(struct hlist_bl_head *b)
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_unlock(struct hlist_bl_head *b)
{
__bit_spin_unlock(0, (unsigned long *)b);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_is_locked(struct hlist_bl_head *b)
{
return bit_spin_is_locked(0, (unsigned long *)b);
}
# 9 "./include/linux/rculist_bl.h" 2
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
struct hlist_bl_node *n)
{
;
;
({ uintptr_t _r_a_p__v = (uintptr_t)((struct hlist_bl_node *)((unsigned long)n | 1UL)); if (__builtin_constant_p((struct hlist_bl_node *)((unsigned long)n | 1UL)) && (_r_a_p__v) == (uintptr_t)((void *)0)) ({ union { typeof((h->first)) __val; char __c[1]; } __u = { .__val = ( typeof((h->first))) ((typeof(h->first))(_r_a_p__v)) }; __write_once_size(&((h->first)), __u.__c, sizeof((h->first))); __u.__val; }); else do { do { extern void __compiletime_assert_18(void) ; if (!((sizeof(*&h->first) == sizeof(char) || sizeof(*&h->first) == sizeof(short) || sizeof(*&h->first) == sizeof(int) || sizeof(*&h->first) == sizeof(long)))) __compiletime_assert_18(); } while (0); __asm__ __volatile__("" : : : "memory"); ({ union { typeof(*&h->first) __val; char __c[1]; } __u = { .__val = ( typeof(*&h->first)) ((typeof(*((typeof(h->first))_r_a_p__v)) *)((typeof(h->first))_r_a_p__v)) }; __write_once_size(&(*&h->first), __u.__c, sizeof(*&h->first)); __u.__val; }); } while (0); _r_a_p__v; });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
{
return (struct hlist_bl_node *)
((unsigned long)({ typeof(*(h->first)) *________p1 = (typeof(*(h->first)) *)({ union { typeof((h->first)) __val; char __c[1]; } __u; if (1) __read_once_size(&((h->first)), __u.__c, sizeof((h->first))); else __read_once_size_nocheck(&((h->first)), __u.__c, sizeof((h->first))); do { } while (0); __u.__val; }); do { } while (0); ; ((typeof(*(h->first)) *)(________p1)); }) & ~1UL);
}
# 47 "./include/linux/rculist_bl.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
{
if (!hlist_bl_unhashed(n)) {
__hlist_bl_del(n);
n->pprev = ((void *)0);
}
}
# 74 "./include/linux/rculist_bl.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del_rcu(struct hlist_bl_node *n)
{
__hlist_bl_del(n);
n->pprev = ((void *) 0x200 + (0xdead000000000000UL));
}
# 99 "./include/linux/rculist_bl.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
struct hlist_bl_head *h)
{
struct hlist_bl_node *first;
first = hlist_bl_first(h);
n->next = first;
if (first)
first->pprev = &n->next;
n->pprev = &h->first;
hlist_bl_set_first_rcu(h, n);
}
# 9 "./include/linux/dcache.h" 2
# 1 "./include/linux/lockref.h" 1
# 25 "./include/linux/lockref.h"
struct lockref {
union {
__u64 __attribute__((aligned(8))) lock_count;
struct {
spinlock_t lock;
int count;
};
};
};
extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
# 14 "./include/linux/dcache.h" 2
# 1 "./include/linux/stringhash.h" 1
# 1 "./include/linux/hash.h" 1
# 60 "./include/linux/hash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __hash_32_generic(u32 val)
{
return val * 0x61C88647;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash_32_generic(u32 val, unsigned int bits)
{
return __hash_32_generic(val) >> (32 - bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 hash_64_generic(u64 val, unsigned int bits)
{
return val * 0x61C8864680B583EBull >> (64 - bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_64_generic((unsigned long)ptr, bits);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
val ^= (val >> 32);
return (u32)val;
}
# 8 "./include/linux/stringhash.h" 2
# 42 "./include/linux/stringhash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int end_name_hash(unsigned long hash)
{
return hash_64_generic(hash, 32);
}
# 66 "./include/linux/stringhash.h"
extern unsigned int __attribute__((__pure__)) full_name_hash(const void *salt, const char *, unsigned int);
# 77 "./include/linux/stringhash.h"
extern u64 __attribute__((__pure__)) hashlen_string(const void *salt, const char *name);
# 15 "./include/linux/dcache.h" 2
struct path;
struct vfsmount;
# 47 "./include/linux/dcache.h"
struct qstr {
union {
struct {
u32 hash; u32 len;
};
u64 hash_len;
};
const unsigned char *name;
};
extern const struct qstr empty_name;
extern const struct qstr slash_name;
struct dentry_stat_t {
long nr_dentry;
long nr_unused;
long age_limit;
long want_pages;
long dummy[2];
};
extern struct dentry_stat_t dentry_stat;
# 88 "./include/linux/dcache.h"
struct dentry {
unsigned int d_flags;
seqcount_t d_seq;
struct hlist_bl_node d_hash;
struct dentry *d_parent;
struct qstr d_name;
struct inode *d_inode;
unsigned char d_iname[32];
struct lockref d_lockref;
const struct dentry_operations *d_op;
struct super_block *d_sb;
unsigned long d_time;
void *d_fsdata;
union {
struct list_head d_lru;
wait_queue_head_t *d_wait;
};
struct list_head d_child;
struct list_head d_subdirs;
union {
struct hlist_node d_alias;
struct hlist_bl_node d_in_lookup_hash;
struct callback_head d_rcu;
} d_u;
} ;
enum dentry_d_lock_class
{
DENTRY_D_LOCK_NORMAL,
DENTRY_D_LOCK_NESTED
};
struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
int (*d_init)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_prune)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *);
} __attribute__((__aligned__((1 << (6)))));
# 220 "./include/linux/dcache.h"
extern seqlock_t rename_lock;
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
extern struct dentry * d_make_root(struct inode *);
extern void d_genocide(struct dentry *);
extern void d_tmpfile(struct dentry *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
extern int path_has_submounts(const struct path *);
extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry *d_lookup
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment