Skip to content

Instantly share code, notes, and snippets.

@srikanth007m
Created October 27, 2022 11:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save srikanth007m/f6afb41a902f062e7b3516c57e91ab4b to your computer and use it in GitHub Desktop.
Save srikanth007m/f6afb41a902f062e7b3516c57e91ab4b to your computer and use it in GitHub Desktop.
32bit tasks which is being freed p->user_cpus_ptr which has been freed.
Dmesg log Call stack: object(user_mask) has been freed which in slab’s page->freelist. Sourcecode: user_mask has been freed.And user_mask is got from p->user_cpus_ptr
[71053.256528][ T535] ------------[ cut here ]------------
[71053.256537][ T535] kernel BUG at mm/slub.c:363!
[71053.261534][ T535] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
[71053.268068][ T535] Skip md ftrace buffer dump for: 0xa00f50
[71053.834135][ T535] CPU: 5 PID: 535 Comm: default_npth_th Tainted: G S W OE 5.15.41 #1
[71053.843167][ T535] Hardware name: Qualcomm Technologies, Inc. 8550 socrates (DT)
[71053.850855][ T535] pstate: 62400005 (nZCv daif +PAN -UAO +TCO -DIT -SSBS BTYPE=--)
[71053.858726][ T535] pc : __slab_free+0x230/0x28c
[71053.863488][ T535] lr : kfree+0x220/0x2cc
[71053.867710][ T535] sp : ffffffc0303abbf0
[71053.871835][ T535] x29: ffffffc0303abbf0 x28: ffffff88eaae3800 x27: 0000000000000000
[71053.879883][ T535] x26: ffffff88e9dbe580 x25: 0000000000000000 x24: ffffff88e9dbe580
[71053.887936][ T535] x23: 0000000000000001 x22: ffffff88e9dbe580 x21: 0000000000000000
[71053.895978][ T535] x20: ffffff80011d2300 x19: fffffffe23a76f80 x18: ffffffc01e641058
[71053.901418][ T786] type=1400 audit(1663025082.419:1157866): avc: denied { getopt } for comm="FinalizerDaemon" path="/dev/socket/usap_pool_primary" scontext=u:r:platform_app:s0:c512,c768 tcontext=u:r:zygote:s0 tclass=unix_stream_socket permissive=0 app=com.miui.accessibility
[71053.904022][ T535] x17: eaae3c38ffffff88 x16: 0000000000000000 x15: 00000000b31aa44d
[71053.904027][ T535] x14: 0000000026a20879 x13: 0000000050575121 x12: 5b80005b111e83ac
[71053.904031][ T535] x11: 8167a90b1b0beb00 x10: fffffffe226b8680 x9 : 00001fffffffee1a
[71053.904036][ T535] x8 : ffffff897909e000 x7 : 0000000000000000 x6 : ffffffdb0c12b61c
[71053.904040][ T535] x5 : ffffffdb111e8004 x4 : 0000000000000001 x3 : ffffff88e9dbe580
[71053.904044][ T535] x2 : ffffff88e9dbe580 x1 : fffffffe23a76f80 x0 : ffffff80011d2300
[71053.904049][ T535] Call trace:
[71053.904050][ T535] __slab_free+0x230/0x28c
[71053.904054][ T535] kfree+0x220/0x2cc
[71053.904057][ T535] __set_cpus_allowed_ptr_locked+0x18c/0x238
[71053.904063][ T535] __sched_setaffinity+0x10c/0x150
[71053.904070][ T535] sched_setaffinity+0x194/0x2b0
[71053.904073][ T535] __arm64_compat_sys_sched_setaffinity+0x58/0x8c
[71053.904079][ T535] invoke_syscall+0x60/0x150
[71053.904083][ T535] el0_svc_common.llvm.226235472735490302+0x98/0x114
[71053.904086][ T535] do_el0_svc_compat+0x20/0x30
[71053.904088][ T535] el0_svc_compat+0x28/0x90
[71053.904094][ T535] el0t_32_sync_handler+0x7c/0xbc
[71053.904097][ T535] el0t_32_sync+0x1b8/0x1bc
[71053.904103][ T535] Code: aa1403e0 aa1303e1 940002bf 17ffffcc (d4210000) -013 |set_freepointer(inline)
| s = 0xFFFFFF80011D2300
| object = 0xFFFFFF88E9DBE580
| fp = 0xFFFFFF88E9DBE580
-013 |__slab_free(s = 0xFFFFFF80011D2300, page = 0xFFFFFFFE23A76F80, head = 0xFFFFFF88E9DBE580, tail = 0xF
| s = 0xFFFFFF80011D2300
| page = 0xFFFFFFFE23A76F80
| head = 0xFFFFFF88E9DBE580
| tail = 0xFFFFFF88E9DBE580
| cnt = 1
| flags = 0
| n = 0x0
| prior = 0xFFFFFF88E9DBE580
-014 |slab_free(inline)
-014 |kfree(x = ?)
-015 |__set_cpus_allowed_ptr_locked(p = 0xFFFFFF88EAAE3800, new_mask = ?, flags = ?, rq = 0xFFFFFF8971196A
| p = 0xFFFFFF88EAAE3800
| flags = ?
| rq = 0xFFFFFF8971196A40
| rf = 0xFFFFFFC0303ABD38
| cpu_allowed_mask = 0xFFFFFFDB13D6D8F8
| ret = 0
-016 |__sched_setaffinity(p = 0xFFFFFF88EAAE3800, mask = ?)
| p = 0xFFFFFF88EAAE3800
| mask = ?
| cpus_allowed = ((bits = (103)))
| new_mask = ((bits = (103)))
| retval = ???
-017 |sched_setaffinity(pid = ?, in_mask = 0xFFFFFFC0303ABDD0)
| pid = ?
| in_mask = 0xFFFFFFC0303ABDD0
| retval = ???
| p = 0xFFFFFF88EAAE3800
-018 |__se_compat_sys_sched_setaffinity(inline)
-018 |__arm64_compat_sys_sched_setaffinity(regs = ?)
| regs = ?
-019 |__invoke_syscall(inline)
| regs = 0xFFFFFFC0303ABEB0
-019 |invoke_syscall(regs = 0xFFFFFFC0303ABEB0, scno = ?, sc_nr = ?, syscall_table = ?)
| regs = 0xFFFFFFC0303ABEB0
| ret = ???
-020 |el0_svc_common.llvm.226235472735490302(regs = 0xFFFFFFC0303ABEB0, scno = ?, :sc_nr = 449, syscall_ta
| regs = 0xFFFFFFC0303ABEB0
| sc_nr = 449
| syscall_table = 0xFFFFFFDB1301AF08
| flags = 0
-021 |do_el0_svc_compat(regs = ?)
| regs = ?
-022 |local_daif_mask(inline)
-022 |prepare_exit_to_user_mode(inline)
| regs = 0xFFFFFFC0303ABEB0
| flags = 0
-022 |exit_to_user_mode(inline)
| regs = 0xFFFFFFC0303ABEB0
-022 |el0_svc_compat(regs = 0xFFFFFFC0303ABEB0)
| regs = 0xFFFFFFC0303ABEB0
-023 |el0t_32_sync_handler(regs = ?)
| regs = ?
-024 |el0t_32_sync(asm)
--> |exception
-025 |NUR:0x1AC::0xED3063F8(asm)
-026 |NUT:0x1AC::0xD38A28DA(asm) 2815 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2816 const struct cpumask *new_mask,
2817 u32 flags,
2818 struct rq *rq,
2819 struct rq_flags *rf)
2820 __releases(rq->lock)
2821 __releases(p->pi_lock)
2822 {
2823 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
2824 const struct cpumask *cpu_valid_mask = cpu_active_mask;
2825 bool kthread = p->flags & PF_KTHREAD;
2826 struct cpumask *user_mask = NULL;
2827 unsigned int dest_cpu;
2828 int ret = 0;
2829
2830 update_rq_clock(rq);
2831
2832 if (kthread || is_migration_disabled(p)) {
2833 /*
2834 * Kernel threads are allowed on online && !active CPUs,
2835 * however, during cpu-hot-unplug, even these might get pushed
2836 * away if not KTHREAD_IS_PER_CPU.
2837 *
2838 * Specifically, migration_disabled() tasks must not fail the
2839 * cpumask_any_and_distribute() pick below, esp. so on
2840 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2841 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
2842 */
2843 cpu_valid_mask = cpu_online_mask;
2844 }
2845
2846 if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
2847 ret = -EINVAL;
2848 goto out;
2849 }
2850
2851 /*
2852 * Must re-check here, to close a race against __kthread_bind(),
2853 * sched_setaffinity() is not guaranteed to observe the flag.
2854 */
2855 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2856 ret = -EINVAL;
2857 goto out;
2858 }
2859
2860 if (!(flags & SCA_MIGRATE_ENABLE)) {
2861 if (cpumask_equal(&p->cpus_mask, new_mask))
2862 goto out;
2863
2864 if (WARN_ON_ONCE(p == current &&
2865 is_migration_disabled(p) &&
2866 !cpumask_test_cpu(task_cpu(p), new_mask))) {
2867 ret = -EBUSY;
2868 goto out;
2869 }
2870 }
2871
2872 /*
2873 * Picking a ~random cpu helps in cases where we are changing affinity
2874 * for groups of tasks (ie. cpuset), so that load balancing is not
2875 * immediately required to distribute the tasks within their new mask.
2876 */
2877 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
2878 trace_android_rvh_set_cpus_allowed_ptr_locked(cpu_valid_mask, new_mask, &dest_cpu);
2879 trace_android_rvh_set_cpus_allowed_by_task(cpu_valid_mask, new_mask, p, &dest_cpu);
2880
2881 if (dest_cpu >= nr_cpu_ids) {
2882 ret = -EINVAL;
2883 goto out;
2884 }
2885
2886 __do_set_cpus_allowed(p, new_mask, flags);
2887
2888 if (flags & SCA_USER)
2889 user_mask = clear_user_cpus_ptr(p);
2890
2891 ret = affine_move_task(rq, p, rf, dest_cpu, flags);
2892
2893 kfree(user_mask);
2894
2895 return ret;
2896
2582 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2583 {
2584 struct cpumask *user_mask = NULL;
2585
2586 swap(p->user_cpus_ptr, user_mask);
2587
2588 return user_mask;
2589 }
2590
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment