-
Aosp10/art/runtime/jni/jni_internal.cc
static jint RegisterNatives(JNIEnv* env, jclass java_class, const JNINativeMethod* methods, jint method_count) { if (UNLIKELY(method_count < 0)) { JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d", method_count); return JNI_ERR; // Not reached except in unit tests. } CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR); ScopedObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> c = hs.NewHandle(soa.Decode<mirror::Class>(java_class)); if (UNLIKELY(method_count == 0)) { LOG(WARNING) << "JNI RegisterNativeMethods: attempt to register 0 native methods for " << c->PrettyDescriptor(); return JNI_OK; } CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", methods, JNI_ERR); for (jint i = 0; i < method_count; ++i) { const char* name = methods[i].name; const char* sig = methods[i].signature; const void* fnPtr = methods[i].fnPtr;
修改后
static jint RegisterNatives(JNIEnv* env, jclass java_class, const JNINativeMethod* methods, jint method_count) { if (UNLIKELY(method_count < 0)) { JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d", method_count); return JNI_ERR; // Not reached except in unit tests. } CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR); ScopedObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> c = hs.NewHandle(soa.Decode<mirror::Class>(java_class)); if (UNLIKELY(method_count == 0)) { LOG(WARNING) << "JNI RegisterNativeMethods: attempt to register 0 native methods for " << c->PrettyDescriptor(); return JNI_OK; } CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", methods, JNI_ERR); for (jint i = 0; i < method_count; ++i) { const char* name = methods[i].name; const char* sig = methods[i].signature; const void* fnPtr = methods[i].fnPtr; LOG(WARNING) << "JNI RegisterNativeMethods: " << c->PrettyDescriptor() << " name:" << name << " sig:" << sig << " fnPtr:" << fnPtr;
fs/proc/task_mmu.c
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
vm_flags_t flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
char path_buf[PATH_MAX] = {0};
//star add
if (file) {
char *path = d_path(&file->f_path, path_buf, sizeof(path_buf));
if(strlen(path) > 0) {
if(strstr(path, "local/")) {
printk("[Hide Maps] %s\n", path);
return;
}
}
}
//end add
system/core/init/selinux.cpp
bool IsEnforcing() {
return false;
if (ALLOW_PERMISSIVE_SELINUX) {
return StatusFromCmdline() == SELINUX_ENFORCING;
}
return true;
}
static inline uid_t get_task_uid(struct task_struct *task)
{
uid_t uid = 0;
const struct cred *cred;
cred = get_task_cred(task);
uid = cred->uid.val;
put_cred(cred);
return uid;
}
SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
unsigned long, data)
{
struct task_struct *child;
long ret;
int caller_uid;
int callee_uid;
// add
// bytedance
child = ptrace_get_task_struct(pid);
if (IS_ERR(child)) {
// 优先执行
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
if (!ret)
arch_ptrace_attach(current);
goto out;
} else {
ret = PTR_ERR(child);
goto out;
}
}
if (request == PTRACE_TRACEME || request == PTRACE_ATTACH) {
// Harpe: Caller name is frida-helper-32, pid is 3944, caller_pid is 3922, caller_uid is 0
// Harpe: Callee name is m.myapplication, pid is 3944, callee_pid is 3944, callee_uid is 10140
//获取Caller进程id
caller_uid = get_task_uid(current);
// printk(KERN_INFO "Harpe: Caller name is %s, pid is %d, caller_pid is %d, caller_uid is %d\n", current->comm, pid, task_pid_nr(current), caller_uid);
callee_uid = get_task_uid(child);
// printk(KERN_INFO "Harpe: Callee name is %s, pid is %d, callee_pid is %d, callee_uid is %d\n", child->comm, pid, task_pid_nr(child), callee_uid);
printk(KERN_INFO "Harpe: Caller name is %s, caller_pid is %d, caller_uid is %d, Callee name is %s, callee_pid is %d, callee_uid is %d\n", current->comm, task_pid_nr(current), caller_uid, child->comm, task_pid_nr(child), callee_uid);
if(caller_uid >= 10000 && callee_uid >= 10000)
{
if(request == PTRACE_TRACEME)
{
printk(KERN_INFO "Harpe: PASS PTRACE_TRACEME, Caller name is %s, Callee name is %s\n", current->comm, child->comm);
return 0;
}
if(request == PTRACE_ATTACH)
{
//如果进程的uid和当前调用ptrace的uid一致,也直接返回成
if(caller_uid == callee_uid)
{
printk(KERN_INFO "Harpe: PASS PTRACE_ATTACH, Caller name is %s, Callee name is %s\n", current->comm, child->comm);
return 0;
}
}
}
}
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
if (!ret)
arch_ptrace_attach(current);
goto out;
}
/*if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
if (!ret)
arch_ptrace_attach(current);
goto out;
}
child = ptrace_get_task_struct(pid);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
goto out;
}*/
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
ret = ptrace_attach(child, request, addr, data);
/*
* Some architectures need to do book-keeping after
* a ptrace attach.
*/
if (!ret)
arch_ptrace_attach(child);
goto out_put_task_struct;
}
ret = ptrace_check_attach(child, request == PTRACE_KILL ||
request == PTRACE_INTERRUPT);
if (ret < 0)
goto out_put_task_struct;
ret = arch_ptrace(child, request, addr, data);
if (ret || request != PTRACE_DETACH)
ptrace_unfreeze_traced(child);
out_put_task_struct:
put_task_struct(child);
out:
return ret;
}
art/runtime/interpreter/interpreter.cc
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind; //强制Switch模式
art/runtime/interpreter/interpreter_common.h
static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
const uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (true) {
#define TRACE_LOG std::cerr
std::ostringstream oss;
oss << shadow_frame.GetMethod()->PrettyMethod()
<< android::base::StringPrintf("\t[Address]0x%x: ", dex_pc)
<< inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\t[Regs]";
for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
uint32_t raw_value = shadow_frame.GetVReg(i);
ObjPtr<mirror::Object> ref_value = shadow_frame.GetVRegReference(i);
oss << android::base::StringPrintf(" vreg%u=0x%08X", i, raw_value);
if (ref_value != nullptr) {
if (ref_value->GetClass()->IsStringClass() &&
!ref_value->AsString()->IsValueNull()) {
oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
} else {
oss << "/" << ref_value->PrettyTypeOf();
}
}
}
TRACE_LOG << oss.str() << "\n";
LOG(ERROR) << oss.str() << "\n";
#undef TRACE_LOG
}
}
arch/arm64/kernel/stacktrace.c
/*
* Stack tracing support
*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/kasan.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
* AArch64 PCS assigns the frame pointer to x29.
*
* A simple function prologue looks like this:
* sub sp, sp, #0x10
* stp x29, x30, [sp]
* mov x29, sp
*
* A simple function epilogue looks like this:
* mov sp, x29
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
if (fp & 0xf)
return -EINVAL;
if (!tsk)
tsk = current;
if (!on_accessible_stack(tsk, fp))
return -EINVAL;
kasan_disable_current();
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
kasan_enable_current();
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
if (WARN_ON_ONCE(frame->graph == -1))
return -EINVAL;
if (frame->graph < -1)
frame->graph += FTRACE_NOTRACE_DEPTH;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
frame->pc = tsk->ret_stack[frame->graph--].ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
* Frames created upon entry from EL0 have NULL FP and PC values, so
* don't bother reporting these. Frames created by __noreturn functions
* might have a valid FP even if PC is bogus, so only terminate where
* both are NULL.
*/
if (!frame->fp && !frame->pc)
return -EINVAL;
return 0;
}
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
int ret;
if (fn(frame, data))
break;
ret = unwind_frame(tsk, frame);
if (ret < 0)
break;
}
}
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
if (data->skip) {
data->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = 0;
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
#endif
walk_stackframe(current, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
if (!try_get_task_stack(tsk))
return;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = nosched;
if (tsk != current) {
frame.fp = thread_saved_fp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
/* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
//star add
#include <linux/uaccess.h>
struct stack_frame_user {
const void __user *next_fp;
unsigned long lr;
};
static int
copy_stack_frame_user(const void __user *fp, struct stack_frame_user *frame)
{
int ret;
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
return 0;
ret = 1;
pagefault_disable();
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
ret = 0;
pagefault_enable();
return ret;
}
static inline void __save_stack_trace_user(struct stack_trace *trace)
{
const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->regs[29];
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = regs->pc;
while (trace->nr_entries < trace->max_entries) {
struct stack_frame_user frame;
frame.next_fp = NULL;
frame.lr = 0;
if (!copy_stack_frame_user(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
if (frame.lr) {
trace->entries[trace->nr_entries++] = frame.lr;
}
fp = frame.next_fp;
}
}
void save_stack_trace_users(struct stack_trace *trace)
{
if (current->mm) {
__save_stack_trace_user(trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace_users);
//end add
include/linux/stacktrace.h
//star add
extern void save_stack_trace_users(struct stack_trace *trace);
//end add
extern void save_stack_trace(struct stack_trace *trace);
#else /* !CONFIG_STACKTRACE */
//star add
# define save_stack_trace_users(trace) do { } while (0)
//end add
# define save_stack_trace(trace) do { } while (0)
arch/arm64/configs/floral_defconfig
CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_STACKTRACE=y
CONFIG_HAVE_DEBUG_BUGVERBOSE=y